hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b4f13cf30e0fd163c8da85664116fd4d706a116 | 457 | py | Python | files/exercises/errors-identifying-syntax-errors.py | mforneris/introduction_to_python_course | 8075973ee89a921a5e2693f649adbf1fc0e0b2cb | [
"CC-BY-4.0"
] | null | null | null | files/exercises/errors-identifying-syntax-errors.py | mforneris/introduction_to_python_course | 8075973ee89a921a5e2693f649adbf1fc0e0b2cb | [
"CC-BY-4.0"
] | null | null | null | files/exercises/errors-identifying-syntax-errors.py | mforneris/introduction_to_python_course | 8075973ee89a921a5e2693f649adbf1fc0e0b2cb | [
"CC-BY-4.0"
] | 1 | 2020-01-09T10:58:56.000Z | 2020-01-09T10:58:56.000Z | # Identifying Syntax Errors
# 1. Read the code below, and (without running it) try to identify what the errors are.
# 2. Run the code, and read the error message. Is it a SyntaxError or an IndentationError?
# 3. Fix the error.
# 4. Repeat steps 2 and 3, until you have fixed all the errors.
def another_function
print("Syntax errors are annoying.")
print("But at least Python tells us about them!")
print("So they are usually not too hard to fix.") | 41.545455 | 90 | 0.733042 |
5812fd99ab1191a2e465069cca70fbdcd49183dd | 7,887 | py | Python | holoviews/tests/core/data/test_pandasinterface.py | TheoMathurin/holoviews | 0defcef994d6dd6d2054f75a0e332d02d121f8b0 | [
"BSD-3-Clause"
] | 864 | 2019-11-13T08:18:27.000Z | 2022-03-31T13:36:13.000Z | holoviews/tests/core/data/test_pandasinterface.py | chrinide/holoviews | e1234a60ae0809ac561c204b1998dff0452b2bf0 | [
"BSD-3-Clause"
] | 1,117 | 2019-11-12T16:15:59.000Z | 2022-03-30T22:57:59.000Z | holoviews/tests/core/data/test_pandasinterface.py | chrinide/holoviews | e1234a60ae0809ac561c204b1998dff0452b2bf0 | [
"BSD-3-Clause"
] | 180 | 2019-11-19T16:44:44.000Z | 2022-03-28T22:49:18.000Z | from unittest import SkipTest
import numpy as np
try:
import pandas as pd
except:
raise SkipTest("Could not import pandas, skipping PandasInterface tests.")
from holoviews.core.dimension import Dimension
from holoviews.core.data import Dataset
from holoviews.core.data.interface import DataError
from holoviews.core.spaces import HoloMap
from holoviews.element import Scatter, Points, Distribution
from .base import HeterogeneousColumnTests, InterfaceTests
class BasePandasInterfaceTests(HeterogeneousColumnTests, InterfaceTests):
"""
Test for the PandasInterface.
"""
__test__ = False
def test_duplicate_dimension_constructor(self):
ds = Dataset(([1, 2, 3], [1, 2, 3]), ['A', 'B'], ['A'])
self.assertEqual(list(ds.data.columns), ['A', 'B'])
def test_dataset_empty_list_init_dtypes(self):
dataset = Dataset([], kdims=['x'], vdims=['y'])
for d in 'xy':
self.assertEqual(dataset.dimension_values(d).dtype, np.float64)
def test_dataset_series_construct(self):
ds = Scatter(pd.Series([1, 2, 3], name='A'))
self.assertEqual(ds, Scatter(([0, 1, 2], [1, 2, 3]), 'index', 'A'))
def test_dataset_df_construct_autoindex(self):
ds = Scatter(pd.DataFrame([1, 2, 3], columns=['A'], index=[1, 2, 3]), 'test', 'A')
self.assertEqual(ds, Scatter(([0, 1, 2], [1, 2, 3]), 'test', 'A'))
def test_dataset_df_construct_not_autoindex(self):
ds = Scatter(pd.DataFrame([1, 2, 3], columns=['A'], index=[1, 2, 3]), 'index', 'A')
self.assertEqual(ds, Scatter(([1, 2, 3], [1, 2, 3]), 'index', 'A'))
def test_dataset_single_column_construct(self):
ds = Scatter(pd.DataFrame([1, 2, 3], columns=['A']))
self.assertEqual(ds, Scatter(([0, 1, 2], [1, 2, 3]), 'index', 'A'))
def test_dataset_df_duplicate_columns_raises(self):
df = pd.DataFrame(np.random.randint(-100,100, size=(100, 2)), columns=list("AB"))
with self.assertRaises(DataError):
Dataset(df[['A', 'A']])
def test_dataset_extract_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df, kdims=['x'])
self.assertEqual(ds.vdims, [Dimension('y'), Dimension('z')])
def test_dataset_process_index(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df, 'index')
self.assertEqual(ds.kdims, [Dimension('index')])
self.assertEqual(ds.vdims, [Dimension('x'), Dimension('y'), Dimension('z')])
def test_dataset_extract_kdims_and_vdims_no_bounds(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df)
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('y'), Dimension('z')])
self.assertEqual(ds.vdims, [])
def test_dataset_extract_kdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Distribution(df)
self.assertEqual(ds.kdims, [Dimension('x')])
def test_dataset_extract_kdims_and_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Points(df)
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('y')])
self.assertEqual(ds.vdims, [Dimension('z')])
def test_dataset_element_allowing_two_kdims_with_one_default_kdim(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Scatter(df)
self.assertEqual(ds.kdims, [Dimension('x')])
self.assertEqual(ds.vdims, [Dimension('y'), Dimension('z')])
def test_dataset_extract_kdims_with_vdims_defined(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Points(df, vdims=['x'])
self.assertEqual(ds.kdims, [Dimension('y'), Dimension('z')])
self.assertEqual(ds.vdims, [Dimension('x')])
def test_dataset_extract_all_kdims_with_vdims_defined(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df, vdims=['x'])
self.assertEqual(ds.kdims, [Dimension('y'), Dimension('z')])
self.assertEqual(ds.vdims, [Dimension('x')])
def test_dataset_extract_kdims_declare_no_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Points(df, vdims=[])
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('y')])
self.assertEqual(ds.vdims, [])
def test_dataset_extract_no_kdims_extract_only_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Dataset(df, kdims=[])
self.assertEqual(ds.kdims, [])
self.assertEqual(ds.vdims, [Dimension('x'), Dimension('y'), Dimension('z')])
def test_dataset_extract_vdims_with_kdims_defined(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 2, 3], 'z': [1, 2, 3]},
columns=['x', 'y', 'z'])
ds = Points(df, kdims=['x', 'z'])
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('z')])
self.assertEqual(ds.vdims, [Dimension('y')])
def test_multi_dimension_groupby(self):
x, y, z = list('AB'*10), np.arange(20)%3, np.arange(20)
ds = Dataset((x, y, z), kdims=['x', 'y'], vdims=['z'], datatype=[self.datatype])
keys = [('A', 0), ('B', 1), ('A', 2), ('B', 0), ('A', 1), ('B', 2)]
grouped = ds.groupby(['x', 'y'])
self.assertEqual(grouped.keys(), keys)
group = Dataset({'z': [5, 11, 17]}, vdims=['z'])
self.assertEqual(grouped.last, group)
def test_dataset_simple_dict_sorted(self):
dataset = Dataset({2: 2, 1: 1, 3: 3}, kdims=['x'], vdims=['y'])
self.assertEqual(dataset, Dataset([(i, i) for i in range(1, 4)],
kdims=['x'], vdims=['y']))
def test_dataset_conversion_with_index(self):
df = pd.DataFrame({'y': [1, 2, 3]}, index=[0, 1, 2])
scatter = Dataset(df).to(Scatter, 'index', 'y')
self.assertEqual(scatter, Scatter(([0, 1, 2], [1, 2, 3]), 'index', 'y'))
def test_dataset_conversion_groupby_with_index(self):
df = pd.DataFrame({'y': [1, 2, 3], 'x': [0, 0, 1]}, index=[0, 1, 2])
scatters = Dataset(df).to(Scatter, 'index', 'y')
hmap = HoloMap({0: Scatter(([0, 1], [1, 2]), 'index', 'y'),
1: Scatter([(2, 3)], 'index', 'y')}, 'x')
self.assertEqual(scatters, hmap)
def test_dataset_from_multi_index(self):
df = pd.DataFrame({'x': np.arange(10), 'y': np.arange(10), 'z': np.random.rand(10)})
ds = Dataset(df.groupby(['x', 'y']).mean(), ['x', 'y'])
self.assertEqual(ds, Dataset(df, ['x', 'y']))
def test_dataset_from_multi_index_tuple_dims(self):
df = pd.DataFrame({'x': np.arange(10), 'y': np.arange(10), 'z': np.random.rand(10)})
ds = Dataset(df.groupby(['x', 'y']).mean(), [('x', 'X'), ('y', 'Y')])
self.assertEqual(ds, Dataset(df, [('x', 'X'), ('y', 'Y')]))
def test_dataset_with_interface_column(self):
df = pd.DataFrame([1], columns=['interface'])
ds = Dataset(df)
self.assertEqual(list(ds.data.columns), ['interface'])
class PandasInterfaceTests(BasePandasInterfaceTests):
datatype = 'dataframe'
data_type = pd.DataFrame
__test__ = True
| 43.816667 | 92 | 0.548371 |
8a32da9533e8cbf2df44e287b34e286cffee0d00 | 6,775 | py | Python | tests/nightly/models/test_model_nightly.py | ashwinvaidya17/anomalib | 3f7de1b2f1994cf6d9a389293b4831f679abed58 | [
"Apache-2.0"
] | null | null | null | tests/nightly/models/test_model_nightly.py | ashwinvaidya17/anomalib | 3f7de1b2f1994cf6d9a389293b4831f679abed58 | [
"Apache-2.0"
] | null | null | null | tests/nightly/models/test_model_nightly.py | ashwinvaidya17/anomalib | 3f7de1b2f1994cf6d9a389293b4831f679abed58 | [
"Apache-2.0"
] | null | null | null | """Test Models on all MVTec AD Categories."""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import itertools
import math
import multiprocessing
import random
import tempfile
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Union
import numpy as np
import pandas as pd
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import seed_everything
from anomalib.utils.sweep.config import flatten_sweep_params
from tests.helpers.dataset import get_dataset_path
from tests.helpers.model import model_load_test, setup_model_train
def get_model_nncf_cat() -> List:
"""Test helper for getting cartesian product of models and categories.
Returns:
List: Returns a combination of models with their nncf support for each category.
"""
model_support = [
("padim", False),
("dfkde", False),
("dfm", False),
("stfpm", False),
# ("stfpm", True),
("patchcore", False),
("cflow", False),
("ganomaly", False),
]
categories = random.sample(
[
"bottle",
"cable",
"capsule",
"carpet",
"grid",
"hazelnut",
"leather",
"metal_nut",
"pill",
"screw",
"tile",
"toothbrush",
"transistor",
"wood",
"zipper",
],
k=3,
)
return [
(model, nncf, category) for ((model, nncf), category) in list(itertools.product(*[model_support, categories]))
]
class TestModel:
"""Run Model on all categories."""
def _test_metrics(self, trainer, config, model, datamodule):
"""Tests the model metrics but also acts as a setup."""
results = trainer.test(model=model, datamodule=datamodule)[0]
thresholds = OmegaConf.load("tests/nightly/models/performance_thresholds.yaml")
threshold = thresholds[config.model.name][config.dataset.category]
if "optimization" in config.keys() and config.optimization.nncf.apply:
threshold = threshold.nncf
if not (np.isclose(results["image_AUROC"], threshold["image_AUROC"], rtol=0.02) or (results["image_AUROC"] >= threshold["image_AUROC"])):
raise AssertionError(
f"results['image_AUROC']:{results['image_AUROC']} >= threshold['image_AUROC']:{threshold['image_AUROC']}"
)
if config.dataset.task == "segmentation":
if not (np.isclose(results["pixel_AUROC"] ,threshold["pixel_AUROC"], rtol=0.02) or (results["pixel_AUROC"] >= threshold["pixel_AUROC"])):
raise AssertionError(
f"results['pixel_AUROC']:{results['pixel_AUROC']} >= threshold['pixel_AUROC']:{threshold['pixel_AUROC']}"
)
return results
def _save_to_csv(self, config: Union[DictConfig, ListConfig], results: Dict):
"""Save model results to csv. Useful for tracking model drift.
Args:
config (Union[DictConfig, ListConfig]): Model config which is also added to csv for complete picture.
results (Dict): Metrics from trainer.test
"""
# Save results in csv for tracking model drift
model_metrics = flatten_sweep_params(config)
# convert dict, list values to string
for key, val in model_metrics.items():
if isinstance(val, (list, dict, ListConfig, DictConfig)):
model_metrics[key] = str(val)
for metric, value in results.items():
model_metrics[metric] = value
model_metrics_df = pd.DataFrame([model_metrics])
result_path = Path(f"tests/artifacts/{datetime.now().strftime('%m_%d_%Y')}.csv")
result_path.parent.mkdir(parents=True, exist_ok=True)
if not result_path.is_file():
model_metrics_df.to_csv(result_path)
else:
model_metrics_df.to_csv(result_path, mode="a", header=False)
def runner(self, run_configs, path, score_type, device_id):
for model_name, nncf, category in run_configs:
try:
with tempfile.TemporaryDirectory() as project_path:
# Fix seed
seed_everything(42, workers=True)
config, datamodule, model, trainer = setup_model_train(
model_name=model_name,
dataset_path=path,
nncf=nncf,
project_path=project_path,
category=category,
score_type=score_type,
device=[device_id],
)
# test model metrics
results = self._test_metrics(trainer=trainer, config=config, model=model, datamodule=datamodule)
# test model load
model_load_test(config=config, datamodule=datamodule, results=results)
self._save_to_csv(config, results)
except AssertionError as assertion_error:
raise Exception(f"Model: {model_name} NNCF:{nncf} Category:{category}") from assertion_error
def test_model(self, path=get_dataset_path(), score_type=None):
run_configs = get_model_nncf_cat()
with ProcessPoolExecutor(
max_workers=torch.cuda.device_count(), mp_context=multiprocessing.get_context("spawn")
) as executor:
jobs = []
for device_id, run_split in enumerate(
range(0, len(run_configs), math.ceil(len(run_configs) / torch.cuda.device_count()))
):
jobs.append(
executor.submit(
self.runner,
run_configs[run_split : run_split + math.ceil(len(run_configs) / torch.cuda.device_count())],
path,
score_type,
device_id,
)
)
for job in jobs:
try:
job.result()
except Exception as e:
raise e
| 38.061798 | 149 | 0.599114 |
2cd0c0bf455d82aca727a08db6c837125a323abb | 7,830 | py | Python | VSR/DataLoader/VirtualFile.py | johnnylili/VideoSuperResolution | 3f7142167b521ae739e7e0414c3c1cb3a82d9041 | [
"MIT"
] | 1 | 2020-03-28T12:41:29.000Z | 2020-03-28T12:41:29.000Z | VSR/DataLoader/VirtualFile.py | ryujaehun/VideoSuperResolution | b3cb9130ecdb8830b0b3a0bb98264b901a37f4c0 | [
"MIT"
] | null | null | null | VSR/DataLoader/VirtualFile.py | ryujaehun/VideoSuperResolution | b3cb9130ecdb8830b0b3a0bb98264b901a37f4c0 | [
"MIT"
] | 1 | 2020-02-25T16:12:05.000Z | 2020-02-25T16:12:05.000Z | """
Copyright: Intel Corp. 2018
Author: Wenyi Tang
Email: wenyi.tang@intel.com
Created Date: May 9th 2018
Updated Date: May 9th 2018
virtual file is an abstraction of a file or
a collection of ordered frames
"""
from pathlib import Path
from io import SEEK_END, BytesIO
from PIL import Image
import numpy as np
from ..Util.Utility import to_list
class File:
def __init__(self, path, rewind=False):
"""
If path is a file, File opens it and calculates its length.
If path is a folder, File organize each file in the folder as alphabet order
Args:
path: path to a node (can be a file or just a folder)
rewind: rewind the file when reaches EOF
"""
self.path = Path(path)
self.file = []
self.length = dict()
mode = 'rb' # mode must be 'rb'
if self.path.is_file():
self.name = self.path.stem
self.file = [self.path]
with self.path.open(mode) as fd:
fd.seek(0, SEEK_END)
self.length[self.path.name] = fd.tell()
elif self.path.is_dir():
self.name = self.path.stem # TODO: is this right?
for _file in self.path.glob('*'):
self.file.append(_file)
with _file.open(mode) as fd:
fd.seek(0, SEEK_END)
self.length[_file.name] = fd.tell()
self.read_file = []
self.read_pointer = 0
self.end_pointer = sum(self.length.values())
self.cur_fd = None
self.rewind = rewind
def __len__(self):
return self.end_pointer
def reopen(self):
self.file = self.read_file + self.file
self.read_file.clear()
self.read_pointer = 0
self.cur_fd = None
def split(self, depth):
pass
def read(self, count=None):
"""
Read `count` bytes
Args:
count: number of bytes to be read, if None, read all bytes of **1** file
Return:
bytes read
"""
if count == 0:
return b''
if not self.cur_fd and self.file:
self.cur_fd = self.file[0].open('rb')
self.read_file.append(self.file[0])
self.file.pop(0)
elif not self.cur_fd:
raise FileNotFoundError('No frames in File')
read_bytes = self.cur_fd.read(count)
if read_bytes:
self.read_pointer += len(read_bytes)
if count and count > len(read_bytes):
return read_bytes + self.read(count - len(read_bytes))
else:
return read_bytes
else:
if self.file:
self.cur_fd.close()
self.cur_fd = self.file[0].open('rb')
self.read_file.append(self.file[0])
self.file.pop(0)
return self.read(count)
elif self.rewind and self.read_file:
self.file = self.read_file.copy()
self.read_file.clear()
self.cur_fd = None
return self.read(count)
else:
raise EOFError('End of File!')
def read_frame(self, frames=1, *args):
pass
def seek(self, offset, where):
"""
Seek the position by `offset` relative to `where`
Args:
offset: move the read pointer by `offset` bytes
where: could be io.SEEK_END, io.SEEK_CUR, io.SEEK_SET
"""
pass
def tell(self):
"""
Tell the current position of the read pointer
"""
return self.read_pointer
def size(self, name):
"""
Get the length of the file named `name`
Return:
length in bytes
"""
path = Path(name)
name = path.stem if path.exists() else name
return self.length.get(name)
_ALLOWED_RAW_FORMAT = [
'YV12',
'YV21',
'NV12',
'NV21',
'RGB4',
'BGR4'
]
class RawFile(File):
def __init__(self, path, mode, size, rewind=False):
"""
Initiate Raw object. The file is lazy loaded, which means
the file is opened but not loaded into memory.
Arguments:
path: file path or handle
mode: since raw file has no headers, type must be explicitly given
size: a tuple of (width, height), must be explicitly given
rewind: rewind the file when reaches EOF
Raise:
TypeError
"""
if not mode.upper() in _ALLOWED_RAW_FORMAT:
raise TypeError('unknown mode: ' + mode)
self.mode = mode.upper()
self.size = to_list(size)
self.pitch, self.channel_pitch = self._get_frame_pitch()
super(RawFile, self).__init__(path, rewind)
def _get_frame_pitch(self):
"""Get bytes length of one frame.
For the detail of mode fourcc, please see https://www.fourcc.org/
RGB, BGR, and UV channel of NV12, NV21 is packed, while YV12 and YV21 is planar, hence we have:
- **channel0** of YV12, YV21, NV12, NV21 if Y
- **channel1** of YV12 is U, of YV21 is V, of NV12 is UV, of NV21 is VU
- **channel2** of YV12 is V, of YV21 is U
"""
mode = self.mode
width, height = self.size
if mode in ('YV12', 'YV21'):
return height * width * 3 // 2, [height * width, height * width // 4, height * width // 4]
if mode in ('NV12', 'NV21'):
return height * width * 3 // 2, [height * width, height * width // 2]
if mode in ('RGB', 'BGR'):
return height * width * 3, [height * width * 3]
def _get_frame_channel_shape(self):
"""Get each channel's shape according to mode and frame length.
For the detail of mode fourcc, please see https://www.fourcc.org/
"""
mode = self.mode
width, height = self.size
if mode in ('YV12', 'YV21'):
return np.array([1, height, width]), np.array([1, height // 2, width // 2]), np.array(
[1, height // 2, width // 2])
if mode in ('NV12', 'NV21'):
return np.array([1, height, width]), np.array([2, height // 2, width // 2])
if mode in ('RGB', 'BGR'):
return np.array([height, width, 3])
def read_frame(self, frames=1, *args):
"""
read number `frames` of the file.
Arguments:
frames: number of frames to be loaded
id: specify frame format to store (default gray-scale)
Raise:
"""
if self.mode in ('YV12', 'YV21', 'NV12', 'NV21',):
# discard uv plain for acceleration
_image_mode = 'L'
else:
_image_mode = 'RGB'
return [Image.frombytes(_image_mode, self.size, self.read(self.pitch)) for _ in range(frames)]
@property
def shape(self):
return self.size
@property
def frames(self):
return (self.end_pointer - self.read_pointer) // self.pitch
class ImageFile(File):
def __init__(self, path, rewind):
"""Open image1 file or a sequence of image1 frames
Args:
path: file path or handle
rewind: rewind the file when reaches EOF
"""
super(ImageFile, self).__init__(path, rewind)
def read_frame(self, frames=1, *args):
"""read number `frames` of the file.
Args:
frames: number of frames to be loaded
"""
image_bytes = [BytesIO(self.read()) for _ in range(frames)]
return [Image.open(fp) for fp in image_bytes]
@property
def shape(self):
with Image.open(self.file[0]) as img:
return img.width, img.height
@property
def frames(self):
return len(self.file)
| 30.466926 | 103 | 0.550702 |
fffad3a2ab65d0103697f2818dcbb4669cf40718 | 14,896 | py | Python | python/ccxt/async_support/__init__.py | brandsimon/ccxt | 4ba548cf253c04e23d8b8e89aa69a3d6001e6918 | [
"MIT"
] | 2 | 2019-07-15T22:39:54.000Z | 2021-05-15T16:13:00.000Z | python/ccxt/async_support/__init__.py | brandsimon/ccxt | 4ba548cf253c04e23d8b8e89aa69a3d6001e6918 | [
"MIT"
] | null | null | null | python/ccxt/async_support/__init__.py | brandsimon/ccxt | 4ba548cf253c04e23d8b8e89aa69a3d6001e6918 | [
"MIT"
] | 2 | 2020-09-08T01:41:24.000Z | 2021-04-30T00:07:59.000Z | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.55.48'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.aax import aax # noqa: F401
from ccxt.async_support.aofex import aofex # noqa: F401
from ccxt.async_support.ascendex import ascendex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binancecoinm import binancecoinm # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.binanceusdm import binanceusdm # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitbns import bitbns # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.eqonex import eqonex # noqa: F401
from ccxt.async_support.equos import equos # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.gopax import gopax # noqa: F401
from ccxt.async_support.hbtc import hbtc # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobi import huobi # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.ndax import ndax # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.okex3 import okex3 # noqa: F401
from ccxt.async_support.okex5 import okex5 # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'aax',
'aofex',
'ascendex',
'bequant',
'bibox',
'bigone',
'binance',
'binancecoinm',
'binanceus',
'binanceusdm',
'bit2c',
'bitbank',
'bitbay',
'bitbns',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitmart',
'bitmex',
'bitpanda',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bitz',
'bl3p',
'braziliex',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'cdax',
'cex',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'eqonex',
'equos',
'exmo',
'exx',
'flowbtc',
'ftx',
'gateio',
'gemini',
'gopax',
'hbtc',
'hitbtc',
'hollaex',
'huobi',
'huobijp',
'huobipro',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'latoken',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mixcoins',
'ndax',
'novadax',
'oceanex',
'okcoin',
'okex',
'okex3',
'okex5',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'ripio',
'stex',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vcc',
'wavesexchange',
'whitebit',
'xena',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 50.83959 | 86 | 0.557398 |
1c02437a279fb55755d05e34b5fdb1f38e9d9979 | 5,379 | py | Python | scraper/combine.py | rohanthomas1202/HypeInvesting-HackUTD | 8e4234ddcc7f207d87223e0986d759d3df0ccdd9 | [
"MIT"
] | null | null | null | scraper/combine.py | rohanthomas1202/HypeInvesting-HackUTD | 8e4234ddcc7f207d87223e0986d759d3df0ccdd9 | [
"MIT"
] | null | null | null | scraper/combine.py | rohanthomas1202/HypeInvesting-HackUTD | 8e4234ddcc7f207d87223e0986d759d3df0ccdd9 | [
"MIT"
] | null | null | null |
from flair.models import TextClassifier
from flair.data import Sentence
from newsapi import NewsApiClient
import tweepy
import praw
class Message:
def __init__(self, perception, popularity, platform):
self.perception = perception
self.popularity = popularity
self.platform = platform
class Stock:
def __init__(self, perception, popularity, rating):
self.perception = perception
self.popularity = popularity
self.rating = rating
classifier = TextClassifier.load('en-sentiment')
class Reddit:
def __init__(self):
username = 'HackUTD'
password = 'Password!'
app_id = 'API'
app_secret = 'Secret'
self.reddit = praw.Reddit(
user_agent="Comment Extraction by HypeInvestingHackUTD",
client_id=app_id,
client_secret=app_secret,
username=username,
password=password,
)
self.subreddit = self.reddit.subreddit("all")
def getMessages(self, topic):
messages = list()
for submission in self.subreddit.search(query=topic, sort='relevance', time_filter = 'week'):
sentence = Sentence(submission.title)
classifier.predict(sentence)
if sentence.labels[0].score < 0.8:
perception = 0
elif sentence.labels[0].value == 'POSITIVE':
perception = 1
else:
perception = -1
message = Message(perception, submission.score, "Reddit")
messages.append(message)
return messages
class Twitter:
def __init__(self):
consumer_key = 'Twitter'
consumer_secret = 'Twitter'
access_token = 'Twitter'
access_token_secret = 'Twitter'
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth, wait_on_rate_limit=True)
def getMessages(self, topic):
messages = list()
for pages in tweepy.Cursor(self.api.search_tweets, q=topic, result_type = "mixed", lang = 'en', tweet_mode='extended', since_id='1456942670778191874').items(50):
sentence = Sentence(pages.full_text)
classifier.predict(sentence)
if sentence.labels[0].score < 0.8:
perception = 0
elif sentence.labels[0].value == 'POSITIVE':
perception = 1
else:
perception = -1
message = Message(perception, pages.favorite_count, "Twitter")
messages.append(message)
return messages
class News:
def __init__(self):
key = 'News'
self.api = NewsApiClient(api_key=key)
def getMessages(self, topic):
messages = list()
all_articles = self.api.get_everything(qintitle=topic,
from_param='2021-11-01',
to='2021-11-13',
language='en',
page_size=100)
for article in all_articles.get('articles'):
sentence = Sentence(article.get('title'))
classifier.predict(sentence)
if sentence.labels[0].score < 0.8:
perception = 0
elif sentence.labels[0].value == 'POSITIVE':
perception = 1
else:
perception = -1
message = Message(perception, all_articles.get('totalResults'), "News")
messages.append(message)
return messages
topic = "Bitcoin"
twitter = Twitter()
reddit = Reddit()
news = News()
# Twitter
twitterMessages = twitter.getMessages(topic)
twitter_sum = 0.0
twitter_perception = 0.0
for twitterMessage in twitterMessages:
twitter_perception += ((twitterMessage.popularity + 1) * twitterMessage.perception)
twitter_sum += (twitterMessage.popularity + 1)
if twitter_sum > 10000:
twitter_val = 1
else:
twitter_val = twitter_sum / 10000
twitter_perception = twitter_perception / twitter_sum
# Reddit
redditMessages = reddit.getMessages(topic)
reddit_sum = 0.0
reddit_perception = 0.0
for redditMessage in redditMessages:
reddit_perception += ((redditMessage.popularity + 1) * redditMessage.perception)
reddit_sum += (redditMessage.popularity + 1)
if reddit_sum > 10000:
reddit_val = 1
else:
reddit_val = reddit_sum / 10000
reddit_perception = reddit_perception / reddit_sum
# News
newsMessages = news.getMessages(topic)
if newsMessages[0].popularity > 5000:
news_val = 1
else:
news_val = newsMessages[0].popularity / 5000
news_perception = 0.0
for newsMessage in newsMessages:
news_perception += newsMessage.perception
news_perception = news_perception / len(newsMessages)
total_score = (((twitter_val + reddit_val) / 2) + news_val) / 2
total_perception = (((twitter_perception + reddit_perception) / 2) + news_perception) / 2
import math
overall_rating = abs(total_perception) / total_perception * pow(math.tanh(8 * total_score * total_perception), 2) * 100 #Overall Rating = tanh^2(constant * popularity * perception) * 100 * -1 if perception is negative, this gives a range from [-100, 100]
print(total_score, total_perception, overall_rating)
stock = Stock(total_perception, total_score, overall_rating)
| 34.261146 | 254 | 0.63655 |
f6d9bcc2b6a291d813e0e0b4729a00c1ed770c93 | 1,605 | py | Python | fooof/tests/core/test_modutils.py | voytekresearch/fooof | 674c495d19588cfa6c43ef046d566b4f29948d84 | [
"Apache-2.0"
] | 55 | 2017-10-21T08:56:14.000Z | 2018-11-02T17:39:22.000Z | fooof/tests/core/test_modutils.py | voytekresearch/fooof | 674c495d19588cfa6c43ef046d566b4f29948d84 | [
"Apache-2.0"
] | 78 | 2017-10-22T22:20:25.000Z | 2018-11-02T18:22:16.000Z | fooof/tests/core/test_modutils.py | voytekresearch/fooof | 674c495d19588cfa6c43ef046d566b4f29948d84 | [
"Apache-2.0"
] | 21 | 2017-11-27T19:28:26.000Z | 2018-10-23T23:16:29.000Z | """Tests for fooof.core.modutils.
Note: the decorators for copying documentation are not currently tested.
"""
from pytest import raises
from fooof.core.modutils import *
###################################################################################################
###################################################################################################
def test_safe_import():
np = safe_import('numpy')
assert np
bad = safe_import('bad')
assert not bad
def test_check_dependency():
import numpy as np
@check_dependency(np, 'numpy')
def subfunc_good():
pass
subfunc_good()
bad = None
@check_dependency(bad, 'bad')
def subfunc_bad():
pass
with raises(ImportError):
subfunc_bad()
def test_docs_drop_param():
ds = """STUFF
Parameters
----------
first : thing
Words, words, words.
second : stuff
Words, words, words.
Returns
-------
out : yay
Words, words, words.
"""
out = docs_drop_param(ds)
assert 'first' not in out
assert 'second' in out
def test_docs_append_to_section():
ds = """STUFF
Parameters
----------
first : thing
Words, words, words.
second : stuff
Words, words, words.
Returns
-------
out : yay
Words, words, words.
"""
section = 'Parameters'
add = \
"""
third : other_stuff
Added description.
"""
new_ds = docs_append_to_section(ds, section, add)
assert 'third' in new_ds
assert 'Added description' in new_ds
| 18.882353 | 99 | 0.512773 |
c685e675ba4c3dedf2ebbcf843caa757a342db06 | 484 | py | Python | regexlib/2021-5-15/python_re2_test_file/regexlib_7003.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/2021-5-15/python_re2_test_file/regexlib_7003.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/2021-5-15/python_re2_test_file/regexlib_7003.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 7003
# .*[Pp]en[Ii1][\$s].*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"1"*10000+"!1 _SLQ_1"
import re2 as re
from time import perf_counter
regex = """.*[Pp]en[Ii1][\$s].*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 10000 + "!1 _SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | 25.473684 | 50 | 0.60124 |
85c0b04575654e050f97ef8da4b8651ded6813c4 | 68,198 | py | Python | spyder/app/tests/test_mainwindow.py | neophnx/spyder | f76836ce1b924bcc4efd3f74f2960d26a4e528e0 | [
"MIT"
] | 2 | 2019-04-25T08:25:37.000Z | 2019-04-25T08:25:43.000Z | spyder/app/tests/test_mainwindow.py | neophnx/spyder | f76836ce1b924bcc4efd3f74f2960d26a4e528e0 | [
"MIT"
] | null | null | null | spyder/app/tests/test_mainwindow.py | neophnx/spyder | f76836ce1b924bcc4efd3f74f2960d26a4e528e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
import os
import os.path as osp
import shutil
import tempfile
try:
from unittest.mock import Mock, MagicMock
except ImportError:
from mock import Mock, MagicMock # Python 2
import re
import sys
import uuid
# Third party imports
from flaky import flaky
from jupyter_client.manager import KernelManager
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from qtpy import PYQT5, PYQT_VERSION
from qtpy.QtCore import Qt, QTimer, QEvent, QUrl
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage
from qtpy.QtWidgets import (QApplication, QFileDialog, QLineEdit, QTabBar,
QToolTip, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
from matplotlib.testing.compare import compare_images
import nbconvert
# Local imports
from spyder import __trouble_url__, __project_url__
from spyder.app import start
from spyder.app.mainwindow import MainWindow # Tests fail without this import
from spyder.config.base import get_home_dir, get_module_path
from spyder.config.main import CONF
from spyder.widgets.dock import TabFilter
from spyder.preferences.runconfig import RunConfiguration
from spyder.plugins.base import PluginWindow
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.py3compat import PY2, to_text_string
from spyder.utils.programs import is_module_installed
from spyder.widgets.dock import DockTitleBar
# For testing various Spyder urls
if not PY2:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen, URLError
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def get_thirdparty_plugin(main_window, plugin_title):
"""Get a reference to the thirdparty plugin with the title given."""
for plugin in main_window.thirdparty_plugins:
if plugin.get_plugin_title() == plugin_title:
return plugin
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request):
"""Main Window fixture"""
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
try:
use_introspection = request.node.get_marker('use_introspection')
except AttributeError:
use_introspection = False
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
try:
single_instance = request.node.get_marker('single_instance')
except AttributeError:
single_instance = False
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
# Start the window
window = start.main()
# Teardown
def close_window():
window.close()
request.addfinalizer(close_window)
return window
# =============================================================================
# ---- Tests
# =============================================================================
# IMPORTANT NOTE: Please leave this test to be the first one here to
# avoid possible timeouts in Appveyor
@pytest.mark.slow
@pytest.mark.use_introspection
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or not PY2,
reason="Times out on AppVeyor and fails on PY3/PyQt 5.6")
def test_calltip(main_window, qtbot):
"""Test that the calltip in editor is hidden when matching ')' is found."""
# Load test file
text = 'a = [1,2,3]\n(max'
lsp_client = main_window.lspmanager.clients['python']['instance']
with qtbot.waitSignal(lsp_client.sig_initialize, timeout=30000):
main_window.editor.new(fname="test.py", text=text)
code_editor = main_window.editor.get_focus_widget()
# Set text to start
# with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
# code_editor.set_text(text)
# code_editor.document_did_change()
code_editor.set_text(text)
code_editor.go_to_line(2)
code_editor.move_cursor(4)
calltip = code_editor.calltip_widget
assert not calltip.isVisible()
with qtbot.waitSignal(code_editor.sig_signature_invoked, timeout=30000):
qtbot.keyPress(code_editor, Qt.Key_ParenLeft, delay=3000)
# qtbot.keyPress(code_editor, Qt.Key_A, delay=1000)
# qtbot.wait(1000)
# print(calltip.isVisible())
qtbot.waitUntil(lambda: calltip.isVisible(), timeout=3000)
qtbot.keyPress(code_editor, Qt.Key_ParenRight, delay=1000)
qtbot.keyPress(code_editor, Qt.Key_Space)
qtbot.waitUntil(lambda: not calltip.isVisible(), timeout=3000)
assert not QToolTip.isVisible()
qtbot.keyPress(code_editor, Qt.Key_ParenRight, delay=1000)
qtbot.keyPress(code_editor, Qt.Key_Enter, delay=1000)
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.lock_interface_action
plugins = main_window.widgetlist
# By default the action is checked
assert action.isChecked()
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is unchecked
action.setChecked(False)
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
# Restore default state
action.setChecked(True)
@pytest.mark.slow
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
# Undock action
file_explorer.undock_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert file_explorer.undocked_window is not None
assert isinstance(file_explorer.undocked_window, PluginWindow)
assert file_explorer.undocked_window.centralWidget() == file_explorer
# Dock action
file_explorer.dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert file_explorer.undocked_window is None
# Close action
file_explorer.close_plugin_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer.toggle_view_action.isChecked()
# Toggle view action
file_explorer.toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(np.__version__ < '1.14.0', reason="This only happens in Numpy 1.14+")
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For issue 7063
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="Times out in PY2")
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
help_plugin.combo.setFocus()
qtbot.keyClicks(help_plugin.combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClick(help_plugin.combo, Qt.Key_Right)
qtbot.keyClicks(help_plugin.combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
qtbot.keyClicks(help_plugin.combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClick(help_plugin.combo, Qt.Key_Right)
qtbot.keyClicks(help_plugin.combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and os.environ.get('CI') is not None,
reason="Times out on AppVeyor")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and os.environ.get('CI') is not None,
reason="Times out on AppVeyor")
@pytest.mark.use_introspection
def test_get_help_editor(main_window, qtbot):
""" Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# config_status = main_window.lspmanager.clients['python']['status']
# if config_status == main_window.lspmanager.RUNNING:
# main_window.lspmanager.close_client('python')
# with qtbot.waitSignal(main_window.editor.sig_lsp_notification,
# timeout=30000):
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
code_editor.set_text('range')
code_editor.move_cursor(len('range'))
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_signature, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "range"), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@pytest.mark.single_instance
@pytest.mark.skipif(PY2 and os.environ.get('CI', None) is None,
reason="It's not meant to be run outside of CIs in Python 2")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and for %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = ("import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.config.base import get_conf_path\n"
"from spyder.utils.external import lockfile\n"
"lock_file = get_conf_path('spyder.lock')\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()".format(spy_dir_str=spy_dir))
# Test single instance
with qtbot.waitSignal(shell.executed):
shell.execute(lock_code)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It fails sometimes")
def test_move_to_first_breakpoint(main_window, qtbot):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
shell.kernel_client.input("list")
qtbot.wait(500)
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
shell.kernel_client.input("exit")
qtbot.wait(500)
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are still on debugging
assert shell._reading
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.environ.get('CI', None) is None or sys.platform == 'darwin',
reason="It's not meant to be run locally and fails in macOS")
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' and PY2) or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.get_focus_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.filenames == ['', test_file]
assert main_window.ipyconsole.tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See PR #5301
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole._create_client_for_kernel(kc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer.visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 0
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole._create_client_for_kernel(spykc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer.visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 1
# Shutdown the kernels
spykm.shutdown_kernel(now=True)
km.shutdown_kernel(now=True)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.fileexplorer.treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.history[-1]) == osp.normpath(temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.history[-1]) == osp.normpath(temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' or not is_module_installed('Cython') or
sys.platform == 'darwin'),
reason="Hard to test on Windows and macOS and Cython is needed")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.explorer.treewidget.get_index('notebook.ipynb')
projects.explorer.treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.explorer.treewidget.convert_notebook(osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled0.py' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that the breakpoint was set
shell.kernel_client.input("b")
qtbot.wait(500)
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ControlModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ControlModifier)
assert nsb.editor.model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first two cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('CI', None) is None or PYQT5,
reason="It times out sometimes on Windows, it's not "
"meant to be run outside of a CI and it segfaults "
"too frequently in PyQt5")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for issue 4085
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor.ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor.ismaximized
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' or
os.environ.get('CI', None) is not None and PYQT_VERSION >= '5.9'),
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still opem.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
qtbot.keyClicks(control, 'c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 1
# Verify that n works
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 2
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert nsb.editor.model.rowCount() == 3
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Move to the next line
shell.kernel_client.input("n")
qtbot.wait(1000)
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Assert that there is only one entry in the Variable Explorer
assert nsb.editor.model.rowCount() == 1
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Import os to get cwd
with qtbot.waitSignal(shell.executed):
shell.execute('import os')
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir(),
browsing_history=False,
refresh_explorer=True)
qtbot.wait(1000)
# Get cwd in console
qtbot.keyClicks(control, 'os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Create an object that can be plotted
with qtbot.waitSignal(shell.executed):
shell.execute('li = [1, 2, 3]')
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
@pytest.mark.slow
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Doesn't run correctly on Windows and macOS")
def test_fileswitcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the fileswitcher."""
# Assert that the full path of a file is shown in the fileswitcher
file_a = tmpdir.join("a.py")
file_a.write('foo')
main_window.editor.load(str(file_a))
main_window.open_fileswitcher()
item_text = main_window.fileswitcher.list.currentItem().text()
assert str(file_a) in item_text
# Assert that long paths are shortened in the fileswitcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join("b.py")
file_b.write('bar')
main_window.editor.load(str(file_b))
main_window.fileswitcher.close()
main_window.open_fileswitcher()
item_text = main_window.fileswitcher.list.currentItem().text()
assert '...' in item_text
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
# Select the third-party plugin
pylint = get_thirdparty_plugin(main_window, "Static code analysis")
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
qtbot.keyClick(code_editor, Qt.Key_F8)
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint.get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
assert len(result_content['C:']) == 5
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
def test_troubleshooting_menu_item_and_url(monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.app.mainwindow.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
MainWindow.trouble_guide(mockMainWindow_instance)
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
# Check that the URL resolves correctly. Ignored if no internet connection.
try:
urlopen("https://www.github.com", timeout=1)
except Exception:
pass
else:
try:
urlopen(__trouble_url__, timeout=1)
except URLError:
raise
@flaky(max_runs=3)
@pytest.mark.slow
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""Test fix for #6317 : 'Show tutorial' opens the help plugin if closed."""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.plugins_menu.actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plguin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
def test_report_issue_url(monkeypatch):
"""Test that report_issue sends the data, and to correct url."""
body = 'This is an example error report body text.'
title = 'Uncreative issue title here'
body_autogenerated = 'Auto-generated text.'
target_url_base = __project_url__ + '/issues/new'
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
mockMainWindow_instance.render_issue.return_value = body_autogenerated
MockQDesktopServices = MagicMock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.app.mainwindow.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Test when body != None, i.e. when auto-submitting error to Github
target_url = QUrl(target_url_base + '?body=' + body)
MainWindow.report_issue(mockMainWindow_instance, body=body, title=None,
open_webpage=True)
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_with(target_url)
# Test when body != None and title != None
target_url = QUrl(target_url_base + '?body=' + body
+ "&title=" + title)
MainWindow.report_issue(mockMainWindow_instance, body=body, title=title,
open_webpage=True)
assert MockQDesktopServices.openUrl.call_count == 2
mockQDesktopServices_instance.openUrl.called_with(target_url)
def test_render_issue():
"""Test that render issue works without errors and returns text."""
test_description = "This is a test description"
test_traceback = "An error occured. Oh no!"
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
# Test when description and traceback are not provided
test_issue_1 = MainWindow.render_issue(mockMainWindow_instance)
assert type(test_issue_1) == str
assert len(test_issue_1) > 100
# Test when description and traceback are provided
test_issue_2 = MainWindow.render_issue(mockMainWindow_instance,
test_description, test_traceback)
assert type(test_issue_2) == str
assert len(test_issue_2) > 100
assert test_description in test_issue_2
assert test_traceback in test_issue_2
@pytest.mark.slow
@flaky(max_runs=3)
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in ['default'] + list(range(4)):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.setup_default_layouts(layout_idx, settings=settings)
with qtbot.waitSignal(None, timeout=500, raising=False):
# Add a wait to see changes
pass
widgets_layout = layout['widgets']
hidden_widgets = layout['hidden widgets']
for column in widgets_layout:
for row in column:
for idx, widget in enumerate(row):
if idx == 0:
if widget not in hidden_widgets:
print(widget) # spyder: test-skip
assert widget.isVisible()
if __name__ == "__main__":
pytest.main()
| 36.685315 | 111 | 0.683085 |
9949b24c85e3dd4c7fd19b3a662c4355550c19c8 | 1,388 | py | Python | app/products/views.py | erdem/flask-couchdb-example | c2e4dd86ff9ba4bb2327151a7375785b14e240c8 | [
"MIT"
] | 1 | 2021-04-28T00:42:10.000Z | 2021-04-28T00:42:10.000Z | app/products/views.py | erdem/flask-couchdb-example | c2e4dd86ff9ba4bb2327151a7375785b14e240c8 | [
"MIT"
] | null | null | null | app/products/views.py | erdem/flask-couchdb-example | c2e4dd86ff9ba4bb2327151a7375785b14e240c8 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from flask import Blueprint, jsonify, request
from app import Products
products_api = Blueprint('products_api', __name__)
@products_api.route('/', methods=['GET'])
def get_products():
products = Products.all()
response_data = []
for product in products:
d = {
'id': product.id,
'prodname': product.name,
'category': product.category,
'quantity': product.quantity,
}
response_data.append(d)
return jsonify(response_data), HTTPStatus.OK
@products_api.route('/', methods=['POST'])
def create_product():
request_data = request.get_json()
product_data = dict(
name=request_data.get('prodname'),
category=request_data.get('category'),
quantity=request_data.get('quantity')
)
product = Products(**product_data)
product.store()
return HTTPStatus.CREATED
@products_api.route('/<string:product_id>/', methods=["GET"])
def retrieve_product(product_id):
product = Products.load(id=product_id)
if not product:
return jsonify({
'error': 'Product not found.'
}), HTTPStatus.BAD_REQUEST
response_data = {
'id': product.id,
'prodname': product.name,
'category': product.category,
'quantity': product.quantity,
}
return response_data, HTTPStatus.OK
| 24.785714 | 61 | 0.634726 |
cdbaa9618bc4fc6eb776c402fa6f5eb178102323 | 1,456 | py | Python | AC_CDQ_code/utils/step.py | Jiang-HB/AC_CDQ | 4b4ec2d611c4481ad0b99cf7ea79eb23014a0325 | [
"MIT"
] | 7 | 2021-05-03T05:50:14.000Z | 2022-03-24T15:35:59.000Z | AC_CDQ_code/utils/step.py | Jiang-HB/AC_CDQ | 4b4ec2d611c4481ad0b99cf7ea79eb23014a0325 | [
"MIT"
] | null | null | null | AC_CDQ_code/utils/step.py | Jiang-HB/AC_CDQ | 4b4ec2d611c4481ad0b99cf7ea79eb23014a0325 | [
"MIT"
] | 1 | 2022-03-25T02:24:53.000Z | 2022-03-25T02:24:53.000Z | import numpy as np
def step(opts, Q, current_state, n_eps, reward_array, step_n, probs=None):
if probs is not None:
cums = np.cumsum(probs)
probs = cums / np.max(cums)
action = np.random.choice(np.where(np.random.rand() <= probs)[0])
elif opts.policy == "eps":
if np.random.rand() > 1 / np.sqrt(n_eps[current_state]):
if len(Q.shape) == 3:
action = np.argmax(np.mean(Q, 2)[current_state])
if len(Q.shape) == 2:
action = np.argmax(Q[current_state])
else:
action = np.random.randint(0, 4, 1)[0]
if current_state != opts.goal:
if action == 0:
next_state = current_state - opts.n_col
if next_state < 0:
next_state = current_state
elif action == 1:
next_state = current_state + opts.n_col
if next_state >= opts.n_state:
next_state = current_state
elif action == 2:
next_state = current_state - 1
if (next_state + 1) % opts.n_col == 0:
next_state = current_state
elif action == 3:
next_state = current_state + 1
if next_state % opts.n_col == 0:
next_state = current_state
reward = reward_array[step_n]
else:
reward = np.random.choice([-30, 40])
next_state = opts.start
return action, reward, next_state
| 28 | 74 | 0.541896 |
599497d60b227c27f097167dfdc03acc5054059c | 2,088 | py | Python | torchattacks/attacks/multiattack.py | georgeguo-cn/adversarial-attacks-pytorch | d740ee1944cf45185f7010519e04a4447a251fec | [
"MIT"
] | 2 | 2020-09-21T08:20:06.000Z | 2021-02-18T10:13:53.000Z | torchattacks/attacks/multiattack.py | georgeguo-cn/adversarial-attacks-pytorch | d740ee1944cf45185f7010519e04a4447a251fec | [
"MIT"
] | null | null | null | torchattacks/attacks/multiattack.py | georgeguo-cn/adversarial-attacks-pytorch | d740ee1944cf45185f7010519e04a4447a251fec | [
"MIT"
] | null | null | null | import warnings
import torch
from ..attack import Attack
class MultiAttack(Attack):
r"""
MultiAttack is a class to attack a model with various attacks agains same images and labels.
Arguments:
model (nn.Module): model to attack.
attacks (list): list of attacks.
Examples::
>>> attack1 = torchattacks.PGD(model, eps = 4/255, alpha = 8/255, iters=40, random_start=False)
>>> attack2 = torchattacks.PGD(model, eps = 4/255, alpha = 8/255, iters=40, random_start=False)
>>> attack = torchattacks.MultiAttack(model, [attack1, attack2])
>>> adv_images = attack(images, labels)
"""
def __init__(self, model, attacks):
super(MultiAttack, self).__init__("MultiAttack", model)
self.attacks = attacks
# Check validity
ids = []
for attack in attacks:
ids.append(id(attack.model))
if len(set(ids)) != 1:
warnings.warn("At least one of attacks have different model.")
def forward(self, images, labels):
r"""
Overridden.
"""
fails = torch.arange(images.shape[0]).to(self.device)
final_images = images.clone().to(self.device)
labels = labels.to(self.device)
for i, attack in enumerate(self.attacks):
# print('- Multi Attack Progress [%d / %d] ' % ((i+1), len(self.attacks)), end='\r')
adv_images = attack(images[fails], labels[fails])
outputs = self.model(adv_images)
_, pre = torch.max(outputs.data, 1)
succeeds = torch.masked_select(fails, pre != labels[fails])
succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), pre != labels[fails])
final_images[succeeds] = adv_images[succeeds_of_fails]
fails = torch.masked_select(fails, pre == labels[fails])
if len(fails) == 0:
warnings.warn("Ealry Stopped cause all images are successfully perturbed.")
break
return final_images
| 33.677419 | 119 | 0.595785 |
aa4bf69b473c2903505db14506f5fe98c01c0859 | 5,113 | py | Python | osmclient/sol005/pdud.py | OSMadmin/osmclient | 0990fda8a0c43c4cad0e08dda164a2c5496e79e6 | [
"Apache-2.0"
] | 1 | 2020-12-06T17:40:43.000Z | 2020-12-06T17:40:43.000Z | osmclient/sol005/pdud.py | OSMadmin/osmclient | 0990fda8a0c43c4cad0e08dda164a2c5496e79e6 | [
"Apache-2.0"
] | null | null | null | osmclient/sol005/pdud.py | OSMadmin/osmclient | 0990fda8a0c43c4cad0e08dda164a2c5496e79e6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Telefonica
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OSM pdud API handling
"""
from osmclient.common.exceptions import NotFound
from osmclient.common.exceptions import ClientException
from osmclient.common import utils
import json
import logging
class Pdu(object):
def __init__(self, http=None, client=None):
self._http = http
self._client = client
self._logger = logging.getLogger('osmclient')
self._apiName = '/pdu'
self._apiVersion = '/v1'
self._apiResource = '/pdu_descriptors'
self._apiBase = '{}{}{}'.format(self._apiName,
self._apiVersion, self._apiResource)
def list(self, filter=None):
self._logger.debug("")
self._client.get_token()
filter_string = ''
if filter:
filter_string = '?{}'.format(filter)
_, resp = self._http.get2_cmd('{}{}'.format(self._apiBase,filter_string))
if resp:
return json.loads(resp)
return list()
def get(self, name):
self._logger.debug("")
self._client.get_token()
if utils.validate_uuid4(name):
for pdud in self.list():
if name == pdud['_id']:
return pdud
else:
for pdud in self.list():
if 'name' in pdud and name == pdud['name']:
return pdud
raise NotFound("pdud {} not found".format(name))
def get_individual(self, name):
self._logger.debug("")
pdud = self.get(name)
# It is redundant, since the previous one already gets the whole pdudInfo
# The only difference is that a different primitive is exercised
try:
_, resp = self._http.get2_cmd('{}/{}'.format(self._apiBase, pdud['_id']))
except NotFound:
raise NotFound("pdu '{}' not found".format(name))
#print(yaml.safe_dump(resp))
if resp:
return json.loads(resp)
raise NotFound("pdu '{}' not found".format(name))
def delete(self, name, force=False):
self._logger.debug("")
pdud = self.get(name)
querystring = ''
if force:
querystring = '?FORCE=True'
http_code, resp = self._http.delete_cmd('{}/{}{}'.format(self._apiBase,
pdud['_id'], querystring))
#print('HTTP CODE: {}'.format(http_code))
#print('RESP: {}'.format(resp))
if http_code == 202:
print('Deletion in progress')
elif http_code == 204:
print('Deleted')
else:
msg = resp or ""
# if resp:
# try:
# msg = json.loads(resp)
# except ValueError:
# msg = resp
raise ClientException("failed to delete pdu {} - {}".format(name, msg))
def create(self, pdu, update_endpoint=None):
self._logger.debug("")
self._client.get_token()
headers= self._client._headers
headers['Content-Type'] = 'application/yaml'
http_header = ['{}: {}'.format(key,val)
for (key,val) in list(headers.items())]
self._http.set_http_header(http_header)
if update_endpoint:
http_code, resp = self._http.put_cmd(endpoint=update_endpoint, postfields_dict=pdu)
else:
endpoint = self._apiBase
#endpoint = '{}{}'.format(self._apiBase,ow_string)
http_code, resp = self._http.post_cmd(endpoint=endpoint, postfields_dict=pdu)
#print('HTTP CODE: {}'.format(http_code))
#print('RESP: {}'.format(resp))
#if http_code in (200, 201, 202, 204):
if resp:
resp = json.loads(resp)
if not resp or 'id' not in resp:
raise ClientException('unexpected response from server: {}'.format(
resp))
print(resp['id'])
#else:
# msg = "Error {}".format(http_code)
# if resp:
# try:
# msg = "{} - {}".format(msg, json.loads(resp))
# except ValueError:
# msg = "{} - {}".format(msg, resp)
# raise ClientException("failed to create/update pdu - {}".format(msg))
def update(self, name, filename):
self._logger.debug("")
pdud = self.get(name)
endpoint = '{}/{}'.format(self._apiBase, pdud['_id'])
self.create(filename=filename, update_endpoint=endpoint)
| 36.784173 | 95 | 0.564052 |
d3b8bf968f8d03df07dc388110bb75e295ccac48 | 4,471 | py | Python | unitorch/cli/models/vit/__init__.py | fuliucansheng/UniTorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | 2 | 2022-02-05T08:52:00.000Z | 2022-03-27T07:01:34.000Z | unitorch/cli/models/vit/__init__.py | Lixin-Qian/unitorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | null | null | null | unitorch/cli/models/vit/__init__.py | Lixin-Qian/unitorch | 47038321593ce4e7eabda555bd58c0cf89482146 | [
"MIT"
] | 1 | 2022-03-27T07:01:13.000Z | 2022-03-27T07:01:13.000Z | # Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
# pretrained infos
pretrained_vit_infos = {
"default-vit": {
"config": "https://huggingface.co/google/vit-base-patch16-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch16-224/resolve/main/preprocessor_config.json",
},
"vit-base-patch16-224-in21k": {
"config": "https://huggingface.co/google/vit-base-patch16-224-in21k/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch16-224-in21k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-base-patch16-224-in21k/resolve/main/pytorch_model.bin",
},
"vit-base-patch32-224-in21k": {
"config": "https://huggingface.co/google/vit-base-patch32-224-in21k/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch32-224-in21k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-base-patch32-224-in21k/resolve/main/pytorch_model.bin",
},
"vit-large-patch16-224-in21k": {
"config": "https://huggingface.co/google/vit-large-patch16-224-in21k/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-large-patch16-224-in21k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-large-patch16-224-in21k/resolve/main/pytorch_model.bin",
},
"vit-large-patch32-224-in21k": {
"config": "https://huggingface.co/google/vit-large-patch32-224-in21k/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-large-patch32-224-in21k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-large-patch32-224-in21k/resolve/main/pytorch_model.bin",
},
"vit-huge-patch14-224-in21k": {
"config": "https://huggingface.co/google/vit-huge-patch14-224-in21k/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-huge-patch14-224-in21k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-huge-patch14-224-in21k/resolve/main/pytorch_model.bin",
},
"vit-base-patch16-224": {
"config": "https://huggingface.co/google/vit-base-patch16-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch16-224/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-base-patch16-224/resolve/main/pytorch_model.bin",
},
"vit-base-patch16-384": {
"config": "https://huggingface.co/google/vit-base-patch16-384/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch16-384/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-base-patch16-384/resolve/main/pytorch_model.bin",
},
"vit-base-patch32-384": {
"config": "https://huggingface.co/google/vit-base-patch32-384/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-base-patch32-384/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-base-patch32-384/resolve/main/pytorch_model.bin",
},
"vit-large-patch16-224": {
"config": "https://huggingface.co/google/vit-large-patch16-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-large-patch16-224/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-large-patch16-224/resolve/main/pytorch_model.bin",
},
"vit-large-patch16-384": {
"config": "https://huggingface.co/google/vit-large-patch16-384/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-large-patch16-384/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-large-patch16-384/resolve/main/pytorch_model.bin",
},
"vit-large-patch32-384": {
"config": "https://huggingface.co/google/vit-large-patch32-384/resolve/main/config.json",
"vision_config": "https://huggingface.co/google/vit-large-patch32-384/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/google/vit-large-patch32-384/resolve/main/pytorch_model.bin",
},
}
import unitorch.cli.models.vit.modeling
import unitorch.cli.models.vit.processing
| 64.797101 | 123 | 0.706106 |
3084686ebb0a847ba096f1ba156f97fb515999fa | 12,722 | py | Python | examples/BERT/ns_task.py | Hirni-Meshram3/text | 84e6c7bd99c7fb3c229ff289aa722149e3136094 | [
"BSD-3-Clause"
] | 2 | 2021-05-05T23:47:00.000Z | 2021-09-22T02:16:20.000Z | examples/BERT/ns_task.py | Hirni-Meshram3/text | 84e6c7bd99c7fb3c229ff289aa722149e3136094 | [
"BSD-3-Clause"
] | 3 | 2021-02-24T22:51:20.000Z | 2021-03-05T02:38:15.000Z | examples/BERT/ns_task.py | Hirni-Meshram3/text | 84e6c7bd99c7fb3c229ff289aa722149e3136094 | [
"BSD-3-Clause"
] | 4 | 2021-04-26T23:29:16.000Z | 2021-06-11T19:11:05.000Z | import argparse
import time
import math
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from model import NextSentenceTask, BertModel, BertEmbedding
from utils import run_demo, run_ddp, wrap_up
def process_raw_data(whole_data, args):
processed_data = []
for _idx in range(len(whole_data)):
item = whole_data[_idx]
if isinstance(item, list):
item = torch.tensor(item)
if len(item) > 1:
# idx to split the text into two sentencd
split_idx = torch.randint(1, len(item), size=(1, 1)).item()
# Index 2 means same sentence label. Initial true int(1)
processed_data.append([item[:split_idx], item[split_idx:], 1])
# Random shuffle data to have args.frac_ns next sentence set up
shuffle_idx1 = torch.randperm(len(processed_data))
shuffle_idx2 = torch.randperm(len(processed_data))
num_shuffle = int(len(processed_data) * args.frac_ns)
shuffle_zip = list(zip(shuffle_idx1, shuffle_idx2))[:num_shuffle]
for (i, j) in shuffle_zip:
processed_data[i][1] = processed_data[j][0]
processed_data[i][2] = int(0) # Switch same sentence label to false 0
return processed_data
def collate_batch(batch, args, cls_id, sep_id, pad_id):
# Fix sequence length to args.bptt with padding or trim
seq_list = []
tok_type = []
same_sentence_labels = []
for item in batch:
qa_item = torch.cat([item[0], torch.tensor([sep_id]).long(), item[1], torch.tensor([sep_id]).long()])
if qa_item.size(0) > args.bptt:
qa_item = qa_item[:args.bptt]
elif qa_item.size(0) < args.bptt:
qa_item = torch.cat((qa_item,
torch.tensor([pad_id] * (args.bptt -
qa_item.size(0)))))
seq_list.append(qa_item)
_tok_tp = torch.ones((qa_item.size(0)))
_idx = min(len(item[0]) + 1, args.bptt)
_tok_tp[:_idx] = 0.0
tok_type.append(_tok_tp)
same_sentence_labels.append(item[2])
seq_input = torch.stack(seq_list).long().t().contiguous()
seq_input = torch.cat((torch.tensor([[cls_id] * seq_input.size(1)]).long(), seq_input))
tok_type = torch.stack(tok_type).long().t().contiguous()
tok_type = torch.cat((torch.tensor([[0] * tok_type.size(1)]).long(), tok_type))
return seq_input, tok_type, torch.tensor(same_sentence_labels).long().contiguous()
def evaluate(data_source, model, device, criterion, cls_id, sep_id, pad_id, args):
model.eval()
total_loss = 0.
batch_size = args.batch_size
dataloader = DataLoader(data_source, batch_size=batch_size, shuffle=True,
collate_fn=lambda b: collate_batch(b, args, cls_id, sep_id, pad_id))
with torch.no_grad():
for idx, (seq_input, tok_type, target_ns_labels) in enumerate(dataloader):
if args.parallel == 'DDP':
seq_input = seq_input.to(device[0])
tok_type = tok_type.to(device[0])
target_ns_labels = target_ns_labels.to(device[0])
else:
seq_input = seq_input.to(device)
tok_type = tok_type.to(device)
target_ns_labels = target_ns_labels.to(device)
seq_input = seq_input.transpose(0, 1) # Wrap up by DDP or DataParallel
ns_labels = model(seq_input, token_type_input=tok_type)
loss = criterion(ns_labels, target_ns_labels)
total_loss += loss.item()
return total_loss / (len(data_source) // batch_size)
def train(train_dataset, model, train_loss_log, device, optimizer, criterion,
epoch, scheduler, cls_id, sep_id, pad_id, args, rank=None):
model.train()
total_loss = 0.
start_time = time.time()
batch_size = args.batch_size
dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=lambda b: collate_batch(b, args, cls_id, sep_id, pad_id))
train_loss_log.append(0.0)
for idx, (seq_input, tok_type, target_ns_labels) in enumerate(dataloader):
if args.parallel == 'DDP':
seq_input = seq_input.to(device[0])
tok_type = tok_type.to(device[0])
target_ns_labels = target_ns_labels.to(device[0])
else:
seq_input = seq_input.to(device)
tok_type = tok_type.to(device)
target_ns_labels = target_ns_labels.to(device)
optimizer.zero_grad()
seq_input = seq_input.transpose(0, 1) # Wrap up by DDP or DataParallel
ns_labels = model(seq_input, token_type_input=tok_type)
loss = criterion(ns_labels, target_ns_labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.item()
if idx % args.log_interval == 0 and idx > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
if (rank is None) or rank == 0:
train_loss_log[-1] = cur_loss
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | '
'ms/batch {:5.2f} | '
'loss {:8.5f} | ppl {:5.2f}'.format(epoch, idx,
len(train_dataset) // batch_size,
scheduler.get_last_lr()[0],
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def run_main(args, rank=None):
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if args.parallel == 'DDP':
n = torch.cuda.device_count() // args.world_size
device = list(range(rank * n, (rank + 1) * n))
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab = torch.load(args.save_vocab)
cls_id = vocab.stoi['<cls>']
pad_id = vocab.stoi['<pad>']
sep_id = vocab.stoi['<sep>']
if args.dataset == 'WikiText103':
from torchtext.experimental.datasets import WikiText103
train_dataset, valid_dataset, test_dataset = WikiText103(vocab=vocab)
elif args.dataset == 'BookCorpus':
from data import BookCorpus
train_dataset, valid_dataset, test_dataset = BookCorpus(vocab, min_sentence_len=60)
if rank is not None:
chunk_len = len(train_dataset.data) // args.world_size
train_dataset.data = train_dataset.data[(rank * chunk_len):((rank + 1) * chunk_len)]
if args.checkpoint != 'None':
model = torch.load(args.checkpoint)
else:
embed_layer = BertEmbedding(len(vocab), args.emsize)
pretrained_bert = BertModel(len(vocab), args.emsize, args.nhead, args.nhid, args.nlayers, embed_layer, args.dropout)
pretrained_bert.load_state_dict(torch.load(args.bert_model))
model = NextSentenceTask(pretrained_bert)
if args.parallel == 'DDP':
model = model.to(device[0])
model = DDP(model, device_ids=device)
else:
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
best_val_loss = None
train_loss_log, val_loss_log = [], []
for epoch in range(1, args.epochs + 1):
epoch_start_time = time.time()
train(process_raw_data(train_dataset, args), model, train_loss_log, device, optimizer,
criterion, epoch, scheduler, cls_id, sep_id, pad_id, args, rank)
val_loss = evaluate(process_raw_data(valid_dataset, args), model, device, criterion,
cls_id, sep_id, pad_id, args)
val_loss_log.append(val_loss)
if (rank is None) or (rank == 0):
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s '
'| valid loss {:8.5f} | '.format(epoch,
(time.time() - epoch_start_time),
val_loss))
print('-' * 89)
if not best_val_loss or val_loss < best_val_loss:
if rank is None:
with open(args.save, 'wb') as f:
torch.save(model, f)
elif rank == 0:
with open(args.save, 'wb') as f:
torch.save(model.state_dict(), f)
best_val_loss = val_loss
else:
scheduler.step()
if args.parallel == 'DDP':
rank0_devices = [x - rank * len(device) for x in device]
device_pairs = zip(rank0_devices, device)
map_location = {'cuda:%d' % x: 'cuda:%d' % y for x, y in device_pairs}
model.load_state_dict(torch.load(args.save, map_location=map_location))
test_loss = evaluate(process_raw_data(test_dataset, args), model, device, criterion,
cls_id, sep_id, pad_id, args)
if rank == 0:
wrap_up(train_loss_log, val_loss_log, test_loss, args, model.module, 'ns_loss.txt', 'ns_model.pt')
else:
with open(args.save, 'rb') as f:
model = torch.load(f)
test_loss = evaluate(process_raw_data(test_dataset, args), model, device, criterion,
cls_id, sep_id, pad_id, args)
wrap_up(train_loss_log, val_loss_log, test_loss, args, model, 'ns_loss.txt', 'ns_model.pt')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Question-Answer fine-tuning task')
parser.add_argument('--dataset', type=str, default='WikiText103',
help='dataset used for next sentence task')
parser.add_argument('--lr', type=float, default=0.25,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.1,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=5,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=24, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=128,
help='max. sequence length for the next-sentence pair')
parser.add_argument('--min_sentence_len', type=int, default=60,
help='min. sequence length for the raw text tokens')
parser.add_argument('--seed', type=int, default=312216194,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=600, metavar='N',
help='report interval')
parser.add_argument('--checkpoint', type=str, default='None',
help='path to load the checkpoint')
parser.add_argument('--save', type=str, default='ns_bert.pt',
help='path to save the bert model')
parser.add_argument('--save-vocab', type=str, default='torchtext_bert_vocab.pt',
help='path to save the vocab')
parser.add_argument('--bert-model', type=str, default='mlm_bert.pt',
help='path to save the pretrained bert')
parser.add_argument('--frac_ns', type=float, default=0.5,
help='fraction of not next sentence')
parser.add_argument('--parallel', type=str, default='None',
help='Use DataParallel/DDP to train model')
parser.add_argument('--world_size', type=int, default=8,
help='the world size to initiate DPP')
parser.add_argument('--emsize', type=int, default=768,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=3072,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=12,
help='number of layers')
parser.add_argument('--nhead', type=int, default=12,
help='the number of heads in the encoder/decoder of the transformer model')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
args = parser.parse_args()
if args.parallel == 'DDP':
run_demo(run_ddp, run_main, args)
else:
run_main(args)
| 48.372624 | 124 | 0.596447 |
839ef74e06c1519180b457c25a3987e77fa3af99 | 6,196 | py | Python | desktop/core/ext-py/south/south/management/commands/migrate.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 19 | 2015-05-01T19:59:03.000Z | 2021-12-09T08:03:16.000Z | external_apps/south/management/commands/migrate.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | external_apps/south/management/commands/migrate.py | spreeker/democracygame | 525139955cb739c295051f317ab670049511bcf8 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | """
Migrate management command.
"""
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.conf import settings
from django.db import models
from south import migration
from south.migration import Migration, Migrations
from south.migration.utils import get_app_label
from south.exceptions import NoMigrations
from south.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', dest='all_apps', default=False,
help='Run the specified migration for all apps.'),
make_option('--list', action='store_true', dest='show_list', default=False,
help='List migrations noting those that have been applied'),
make_option('--skip', action='store_true', dest='skip', default=False,
help='Will skip over out-of-order missing migrations'),
make_option('--merge', action='store_true', dest='merge', default=False,
help='Will run out-of-order missing migrations as they are - no rollbacks.'),
make_option('--no-initial-data', action='store_true', dest='no_initial_data', default=False,
help='Skips loading initial data if specified.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help="Pretends to do the migrations, but doesn't actually execute them."),
make_option('--db-dry-run', action='store_true', dest='db_dry_run', default=False,
help="Doesn't execute the SQL generated by the db methods, and doesn't store a record that the migration(s) occurred. Useful to test migrations before applying them."),
make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False,
help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Runs migrations for all apps."
args = "[appname] [migrationname|zero] [--all] [--list] [--skip] [--merge] [--no-initial-data] [--fake] [--db-dry-run] [--database=dbalias]"
def handle(self, app=None, target=None, skip=False, merge=False, backwards=False, fake=False, db_dry_run=False, show_list=False, database=DEFAULT_DB_ALIAS, delete_ghosts=False, **options):
# Work out what the resolve mode is
resolve_mode = merge and "merge" or (skip and "skip" or None)
# NOTE: THIS IS DUPLICATED FROM django.core.management.commands.syncdb
# This code imports any module named 'management' in INSTALLED_APPS.
# The 'management' module is the preferred way of listening to post_syncdb
# signals, and since we're sending those out with create_table migrations,
# we need apps to behave correctly.
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + '.management', {}, {}, [''])
except ImportError, exc:
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
# END DJANGO DUPE CODE
# if all_apps flag is set, shift app over to target
if options.get('all_apps', False):
target = app
app = None
# Migrate each app
if app:
try:
apps = [Migrations(app)]
except NoMigrations:
print "The app '%s' does not appear to use migrations." % app
print "./manage.py migrate " + self.args
return
else:
apps = list(migration.all_migrations())
# Do we need to show the list of migrations?
if show_list and apps:
list_migrations(apps, database)
if not show_list:
for app in apps:
result = migration.migrate_app(
app,
#resolve_mode = resolve_mode,
target_name = target,
fake = fake,
db_dry_run = db_dry_run,
verbosity = int(options.get('verbosity', 0)),
load_initial_data = not options.get('no_initial_data', False),
skip = skip,
database = database,
delete_ghosts = delete_ghosts,
)
if result is False:
sys.exit(1) # Migration failed, so the command fails.
def list_migrations(apps, database = DEFAULT_DB_ALIAS):
"""
Prints a list of all available migrations, and which ones are currently applied.
Accepts a list of Migrations instances.
"""
from south.models import MigrationHistory
applied_migrations = MigrationHistory.objects.filter(app_name__in=[app.app_label() for app in apps])
if database != DEFAULT_DB_ALIAS:
applied_migrations = applied_migrations.using(database)
applied_migrations = ['%s.%s' % (mi.app_name,mi.migration) for mi in applied_migrations]
print
for app in apps:
print " " + app.app_label()
# Get the migrations object
for migration in app:
if migration.app_label() + "." + migration.name() in applied_migrations:
print format_migration_list_item(migration.name())
else:
print format_migration_list_item(migration.name(), applied=False)
print
def format_migration_list_item(name, applied=True):
if applied:
return ' (*) %s' % name
return ' ( ) %s' % name
| 45.896296 | 192 | 0.619755 |
30aa97be0c2e5a559db8922e293d76789d027319 | 11,298 | py | Python | lib/installed_clients/KBaseReportClient.py | Tianhao-Gu/kb_gtdbtk | 41ea2c98f2553e6ef795ea703ffa34b703058720 | [
"MIT"
] | 3 | 2020-03-27T09:55:53.000Z | 2021-12-08T07:44:57.000Z | lib/installed_clients/KBaseReportClient.py | Tianhao-Gu/kb_gtdbtk | 41ea2c98f2553e6ef795ea703ffa34b703058720 | [
"MIT"
] | 32 | 2020-02-06T01:10:08.000Z | 2021-10-15T05:44:07.000Z | lib/installed_clients/KBaseReportClient.py | Tianhao-Gu/kb_gtdbtk | 41ea2c98f2553e6ef795ea703ffa34b703058720 | [
"MIT"
] | 5 | 2020-02-04T22:22:35.000Z | 2020-10-30T19:07:54.000Z | # -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
from .baseclient import BaseClient as _BaseClient
class KBaseReport(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login',
service_ver='release',
async_job_check_time_ms=100, async_job_check_time_scale_percent=150,
async_job_check_max_time_ms=300000):
if url is None:
raise ValueError('A url is required')
self._service_ver = service_ver
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc,
async_job_check_time_ms=async_job_check_time_ms,
async_job_check_time_scale_percent=async_job_check_time_scale_percent,
async_job_check_max_time_ms=async_job_check_max_time_ms)
def create(self, params, context=None):
"""
Function signature for the create() method -- generate a simple,
text-based report for an app run.
@deprecated KBaseReport.create_extended_report
:param params: instance of type "CreateParams" (* Parameters for the
create() method * * Pass in *either* workspace_name or
workspace_id -- only one is needed. * Note that workspace_id is
preferred over workspace_name because workspace_id immutable. * *
Required arguments: * SimpleReport report - See the structure
above * string workspace_name - Workspace name of the running
app. Required * if workspace_id is absent * int
workspace_id - Workspace ID of the running app. Required if *
workspace_name is absent) -> structure: parameter "report" of type
"SimpleReport" (* A simple report for use in create() * Optional
arguments: * string text_message - Readable plain-text report
message * string direct_html - Simple HTML text that will be
rendered within the report widget * list<string> warnings - A
list of plain-text warning messages * list<WorkspaceObject>
objects_created - List of result workspace objects that this app *
has created. They will get linked in the report view) ->
structure: parameter "text_message" of String, parameter
"direct_html" of String, parameter "warnings" of list of String,
parameter "objects_created" of list of type "WorkspaceObject" (*
Represents a Workspace object with some brief description text *
that can be associated with the object. * Required arguments: *
ws_id ref - workspace ID in the format
'workspace_id/object_id/version' * Optional arguments: *
string description - A plaintext, human-readable description of
the * object created) -> structure: parameter "ref" of
type "ws_id" (* Workspace ID reference in the format
'workspace_id/object_id/version' * @id ws), parameter
"description" of String, parameter "workspace_name" of String,
parameter "workspace_id" of Long
:returns: instance of type "ReportInfo" (* The reference to the saved
KBaseReport. This is the return object for * both create() and
create_extended() * Returned data: * ws_id ref - reference to a
workspace object in the form of *
'workspace_id/object_id/version'. This is a reference to a saved *
Report object (see KBaseReportWorkspace.spec) * string name -
Plaintext unique name for the report. In * create_extended,
this can optionally be set in a parameter) -> structure: parameter
"ref" of type "ws_id" (* Workspace ID reference in the format
'workspace_id/object_id/version' * @id ws), parameter "name" of
String
"""
return self._client.run_job('KBaseReport.create',
[params], self._service_ver, context)
def create_extended_report(self, params, context=None):
"""
Create a report for the results of an app run. This method handles file
and HTML zipping, uploading, and linking as well as HTML rendering.
:param params: instance of type "CreateExtendedReportParams" (*
Parameters used to create a more complex report with file and HTML
links * * Pass in *either* workspace_name or workspace_id -- only
one is needed. * Note that workspace_id is preferred over
workspace_name because workspace_id immutable. * * Required
arguments: * string workspace_name - Name of the workspace
where the report * should be saved. Required if
workspace_id is absent * int workspace_id - ID of workspace
where the report should be saved. * Required if
workspace_name is absent * Optional arguments: * string
message - Simple text message to store in the report object *
list<WorkspaceObject> objects_created - List of result workspace
objects that this app * has created. They will be linked
in the report view * list<string> warnings - A list of
plain-text warning messages * list<File> html_links - A list
of paths or shock IDs pointing to HTML files or directories. *
If you pass in paths to directories, they will be zipped and
uploaded * int direct_html_link_index - Index in html_links to
set the direct/default view in the * report. Set either
direct_html_link_index or direct_html, but not both * string
direct_html - Simple HTML text content that will be rendered
within the report * widget. Set either direct_html or
direct_html_link_index, but not both * list<File> file_links -
A list of file paths or shock node IDs. Allows the user to *
specify files that the report widget should link for download. If
you pass in paths * to directories, they will be zipped *
string report_object_name - Name to use for the report object
(will * be auto-generated if unspecified) *
html_window_height - Fixed height in pixels of the HTML window for
the report * summary_window_height - Fixed height in pixels of
the summary window for the report) -> structure: parameter
"message" of String, parameter "objects_created" of list of type
"WorkspaceObject" (* Represents a Workspace object with some brief
description text * that can be associated with the object. *
Required arguments: * ws_id ref - workspace ID in the format
'workspace_id/object_id/version' * Optional arguments: *
string description - A plaintext, human-readable description of
the * object created) -> structure: parameter "ref" of
type "ws_id" (* Workspace ID reference in the format
'workspace_id/object_id/version' * @id ws), parameter
"description" of String, parameter "warnings" of list of String,
parameter "html_links" of list of type "File" (* A file to be
linked in the report. Pass in *either* a shock_id or a * path. If
a path to a file is given, then the file will be uploaded. If a *
path to a directory is given, then it will be zipped and uploaded.
* Required arguments: * string path - Can be a file or
directory path. Required if shock_id is absent * string
shock_id - Shock node ID. Required if path is absent * string
name - Plain-text filename (eg. "results.zip") -- shown to the
user * Optional arguments: * string label - A short
description for the file (eg. "Filter results") * string
description - A more detailed, human-readable description of the
file) -> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "name" of String, parameter
"label" of String, parameter "description" of String, parameter
"direct_html" of String, parameter "direct_html_link_index" of
Long, parameter "file_links" of list of type "File" (* A file to
be linked in the report. Pass in *either* a shock_id or a * path.
If a path to a file is given, then the file will be uploaded. If a
* path to a directory is given, then it will be zipped and
uploaded. * Required arguments: * string path - Can be a file
or directory path. Required if shock_id is absent * string
shock_id - Shock node ID. Required if path is absent * string
name - Plain-text filename (eg. "results.zip") -- shown to the
user * Optional arguments: * string label - A short
description for the file (eg. "Filter results") * string
description - A more detailed, human-readable description of the
file) -> structure: parameter "path" of String, parameter
"shock_id" of String, parameter "name" of String, parameter
"label" of String, parameter "description" of String, parameter
"report_object_name" of String, parameter "html_window_height" of
Double, parameter "summary_window_height" of Double, parameter
"workspace_name" of String, parameter "workspace_id" of Long
:returns: instance of type "ReportInfo" (* The reference to the saved
KBaseReport. This is the return object for * both create() and
create_extended() * Returned data: * ws_id ref - reference to a
workspace object in the form of *
'workspace_id/object_id/version'. This is a reference to a saved *
Report object (see KBaseReportWorkspace.spec) * string name -
Plaintext unique name for the report. In * create_extended,
this can optionally be set in a parameter) -> structure: parameter
"ref" of type "ws_id" (* Workspace ID reference in the format
'workspace_id/object_id/version' * @id ws), parameter "name" of
String
"""
return self._client.run_job('KBaseReport.create_extended_report',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.run_job('KBaseReport.status',
[], self._service_ver, context)
| 63.117318 | 89 | 0.631351 |
583f96528fbe6cf61d545e583aac77937ab5871c | 2,249 | py | Python | source/compute_plane/python/lambda/scaling_metrics/scaling_metrics.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 24 | 2021-04-14T11:57:42.000Z | 2022-03-23T17:09:12.000Z | source/compute_plane/python/lambda/scaling_metrics/scaling_metrics.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 9 | 2021-04-23T08:44:13.000Z | 2021-09-15T13:37:42.000Z | source/compute_plane/python/lambda/scaling_metrics/scaling_metrics.py | kirillsc/aws-htc-grid | d1dd8068c3aebc3c04904b3daefc142a4b96872b | [
"Apache-2.0"
] | 15 | 2021-04-14T11:53:58.000Z | 2022-02-28T16:45:47.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 https://aws.amazon.com/apache-2-0/
import logging
import boto3
import time
import os
from api.queue_manager import queue_manager
# TODO - retrieve the endpoint url from Terraform
region = os.environ["REGION"]
def lambda_handler(event, context):
# For every x minute
# count all items with "task_status" PENDING in the dynamoDB table "tasks_state_table"
# put metric in CloudWatch with:
# - namespace: given in the environment variable NAMESPACE
# - DimensionName: given in the environment variable DIMENSION_NAME
# TODO - retrieve the endpoint url from Terraform
task_queue = queue_manager(
task_queue_service=os.environ['TASK_QUEUE_SERVICE'],
task_queue_config=os.environ['TASK_QUEUE_CONFIG'],
tasks_queue_name=os.environ['TASKS_QUEUE_NAME'],
region=region)
task_pending = task_queue.get_queue_length()
logging.info("Scaling Metrics: pending task in DDB = {}".format(task_pending))
# Create CloudWatch client
cloudwatch = boto3.client('cloudwatch')
period = int(os.environ["PERIOD"])
cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': os.environ['METRICS_NAME'],
'Timestamp': time.time(),
'Dimensions': [
{
'Name': os.environ['DIMENSION_NAME'],
'Value': os.environ['DIMENSION_VALUE']
},
],
'Unit': 'Count',
'StorageResolution': period,
'Value': task_pending,
},
],
Namespace=os.environ['NAMESPACE']
)
return
def main():
lambda_handler(event={}, context=None)
if __name__ == "__main__":
# execute only if run as a script
os.environ["STATE_TABLE_CONFIG"] = "tasks_state_table"
os.environ["NAMESPACE"] = "CloudGrid/HTC/Scaling/"
os.environ["DIMENSION_NAME"] = "cluster_name"
os.environ["DIMENSION_VALUE"] = "aws"
os.environ["PERIOD"] = "1"
os.environ["METRICS_NAME"] = "pending_tasks_ddb"
main()
| 32.128571 | 90 | 0.631836 |
19332ad1c4e6a9716062c85cb80d6660d182901e | 2,690 | py | Python | scripts/BRC_microarray/USA/rank_clusters_cross_feature_selection_Netherlands.py | omarmaddouri/GCNCC_cross_validated | 89576ad2c8459f065604656fd38a786d042f09e0 | [
"MIT"
] | 1 | 2022-03-12T13:34:34.000Z | 2022-03-12T13:34:34.000Z | scripts/BRC_microarray/USA/rank_clusters_cross_feature_selection_Netherlands.py | omarmaddouri/GCNCC_cross_validated | 89576ad2c8459f065604656fd38a786d042f09e0 | [
"MIT"
] | 3 | 2022-02-09T23:28:07.000Z | 2022-02-11T19:08:53.000Z | scripts/BRC_microarray/USA/rank_clusters_cross_feature_selection_Netherlands.py | omarmaddouri/GCNCC_cross_validated | 89576ad2c8459f065604656fd38a786d042f09e0 | [
"MIT"
] | null | null | null | import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from BRC_microarray.USA.utils import *
path="../../data/BRC_microarray/output/USA/"
dataset="PPI"
features = np.genfromtxt("../../data/BRC_microarray/output/Netherlands/{}.GE_Features.txt".format(dataset), dtype=np.dtype(np.float32))
labels = get_clinical_status_Netherlands()
clusters = open("{}{}.clusters.txt".format(path, dataset), encoding="utf-8")
total_clusters = get_top_clusters(path, dataset, features, labels, clusters)
print("The complete set of clusters that passed the minimal threshold is \n {}".format(total_clusters))
with open("{}{}.top_features_Netherlands_FS.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_top_clusters = csv.writer(f, delimiter ='\t')
w_top_clusters.writerow(total_clusters)
clust = []
nb_columns = len(labels)
baseline_accuracy = 0
eps = 0.01 #minimum accuracy improvement to consider new cluster (1%)
tmp_Data = object
for i in range(len(total_clusters)):
clust.append(total_clusters[i])
nb_rows = len(clust)
Data = np.zeros((nb_rows, nb_columns), dtype=object)
if(i>0):#if temporary Data vector exist, copy all lines except last
for j in range(nb_rows-1):
Data[j, :] = tmp_Data[j, :]
#Just compute score of newly added cluster
Data[-1, :] = prepare_activity_score_feature_vector(features, labels, clust[nb_rows-1], clusters)
accuracy = logistic_regression_classification_aggregate_activity_scores(np.transpose(Data), labels)
if( accuracy < baseline_accuracy + eps ):
clust = clust[:-1]
tmp_Data = Data
tmp_Data = np.delete(tmp_Data, tmp_Data.shape[0]-1, axis=0)
print("SFS: feature {}/{} checked and rejected".format(i, len(total_clusters)-1))
else:
baseline_accuracy = accuracy
tmp_Data = Data
print("SFS: feature {}/{} checked and retained".format(i, len(total_clusters)-1))
print("The set of clusters to be used in classification is \n {}".format(clust))
with open("{}{}.final_features_Netherlands_FS.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_final_clusters = csv.writer(f, delimiter ='\t')
w_final_clusters.writerow(clust)
print("Logistic regression accuracy: {}".format(accuracy))
#accuracy = LDA_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("LDA accuracy: {}".format(accuracy))
#accuracy = SVM_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("SVM(Linear Kernel) accuracy: {}".format(accuracy))
clusters.close() | 42.03125 | 135 | 0.697026 |
79a88f926bf89043a91f9e78901da349e7bb731d | 11,691 | py | Python | configs/example/spec06_benchmarks.py | GinoAC/gem5_garnet2_CHIPS | b0e8a765ef9d2f6a1bf8513d9f9853432084d122 | [
"BSD-3-Clause"
] | 1 | 2021-05-24T04:49:47.000Z | 2021-05-24T04:49:47.000Z | configs/example/spec06_benchmarks.py | GinoAC/gem5_garnet2_CHIPS | b0e8a765ef9d2f6a1bf8513d9f9853432084d122 | [
"BSD-3-Clause"
] | null | null | null | configs/example/spec06_benchmarks.py | GinoAC/gem5_garnet2_CHIPS | b0e8a765ef9d2f6a1bf8513d9f9853432084d122 | [
"BSD-3-Clause"
] | 2 | 2021-06-04T19:46:55.000Z | 2021-11-11T11:49:16.000Z | import m5
from m5.objects import *
# These three directory paths are not currently used.
#gem5_dir = '<FULL_PATH_TO_YOUR_GEM5_INSTALL>'
#spec_dir = '<FULL_PATH_TO_YOUR_SPEC_CPU2006_INSTALL>'
#out_dir = '<FULL_PATH_TO_DESIRED_OUTPUT_DIRECTORY>'
#temp
#binary_dir = spec_dir
#data_dir = spec_dir
#400.perlbench
perlbench = Process() # Update June 7, 2017: This used to be LiveProcess()
perlbench.executable = 'perlbench'
# TEST CMDS
#perlbench.cmd = [perlbench.executable] + ['-I.', '-I./lib', 'attrs.pl']
# REF CMDS
perlbench.cmd = [perlbench.executable] + ['-I./lib', 'checkspam.pl', '2500', '5', '25', '11', '150', '1', '1', '1', '1']
#perlbench.cmd = [perlbench.executable] + ['-I./lib', 'diffmail.pl', '4', '800', '10', '17', '19', '300']
#perlbench.cmd = [perlbench.executable] + ['-I./lib', 'splitmail.pl', '1600', '12', '26', '16', '4500']
#perlbench.output = out_dir+'perlbench.out'
#401.bzip2
bzip2 = Process() # Update June 7, 2017: This used to be LiveProcess()
bzip2.executable = 'bzip2'
# TEST CMDS
#bzip2.cmd = [bzip2.executable] + ['input.program', '5']
# REF CMDS
bzip2.cmd = [bzip2.executable] + ['input.source', '280']
#bzip2.cmd = [bzip2.executable] + ['chicken.jpg', '30']
#bzip2.cmd = [bzip2.executable] + ['liberty.jpg', '30']
#bzip2.cmd = [bzip2.executable] + ['input.program', '280']
#bzip2.cmd = [bzip2.executable] + ['text.html', '280']
#bzip2.cmd = [bzip2.executable] + ['input.combined', '200']
#bzip2.output = out_dir + 'bzip2.out'
#403.gcc
gcc = Process() # Update June 7, 2017: This used to be LiveProcess()
gcc.executable = 'gcc'
# TEST CMDS
#gcc.cmd = [gcc.executable] + ['cccp.i', '-o', 'cccp.s']
# REF CMDS
gcc.cmd = [gcc.executable] + ['166.i', '-o', '166.s']
#gcc.cmd = [gcc.executable] + ['200.i', '-o', '200.s']
#gcc.cmd = [gcc.executable] + ['c-typeck.i', '-o', 'c-typeck.s']
#gcc.cmd = [gcc.executable] + ['cp-decl.i', '-o', 'cp-decl.s']
#gcc.cmd = [gcc.executable] + ['expr.i', '-o', 'expr.s']
#gcc.cmd = [gcc.executable] + ['expr2.i', '-o', 'expr2.s']
#gcc.cmd = [gcc.executable] + ['g23.i', '-o', 'g23.s']
#gcc.cmd = [gcc.executable] + ['s04.i', '-o', 's04.s']
#gcc.cmd = [gcc.executable] + ['scilab.i', '-o', 'scilab.s']
#gcc.output = out_dir + 'gcc.out'
#410.bwaves
bwaves = Process() # Update June 7, 2017: This used to be LiveProcess()
bwaves.executable = 'bwaves'
# TEST CMDS
#bwaves.cmd = [bwaves.executable]
# REF CMDS
bwaves.cmd = [bwaves.executable]
#bwaves.output = out_dir + 'bwaves.out'
#416.gamess
gamess = Process() # Update June 7, 2017: This used to be LiveProcess()
gamess.executable = 'gamess'
# TEST CMDS
#gamess.cmd = [gamess.executable]
#gamess.input = 'exam29.config'
# REF CMDS
gamess.cmd = [gamess.executable]
gamess.input = 'cytosine.2.config'
#gamess.cmd = [gamess.executable]
#gamess.input = 'h2ocu2+.gradient.config'
#gamess.cmd = [gamess.executable]
#gamess.input = 'triazolium.config'
#gamess.output = out_dir + 'gamess.out'
#429.mcf
mcf = Process() # Update June 7, 2017: This used to be LiveProcess()
mcf.executable = 'mcf'
# TEST CMDS
#mcf.cmd = [mcf.executable] + ['inp.in']
# REF CMDS
mcf.cmd = [mcf.executable] + ['inp.in']
#mcf.output = out_dir + 'mcf.out'
#433.milc
milc = Process() # Update June 7, 2017: This used to be LiveProcess()
milc.executable = 'milc'
# TEST CMDS
#milc.cmd = [milc.executable]
#milc.input = 'su3imp.in'
# REF CMDS
milc.cmd = [milc.executable]
milc.input = 'su3imp.in'
#milc.output = out_dir + 'milc.out'
#434.zeusmp
zeusmp = Process() # Update June 7, 2017: This used to be LiveProcess()
zeusmp.executable = 'zeusmp'
# TEST CMDS
#zeusmp.cmd = [zeusmp.executable]
# REF CMDS
zeusmp.cmd = [zeusmp.executable]
#zeusmp.output = out_dir + 'zeusmp.out'
#435.gromacs
gromacs = Process() # Update June 7, 2017: This used to be LiveProcess()
gromacs.executable = 'gromacs'
# TEST CMDS
#gromacs.cmd = [gromacs.executable] + ['-silent','-deffnm', 'gromacs', '-nice','0']
# REF CMDS
gromacs.cmd = [gromacs.executable] + ['-silent','-deffnm', 'gromacs', '-nice','0']
#gromacs.output = out_dir + 'gromacs.out'
#436.cactusADM
cactusADM = Process() # Update June 7, 2017: This used to be LiveProcess()
cactusADM.executable = 'cactusADM'
# TEST CMDS
#cactusADM.cmd = [cactusADM.executable] + ['benchADM.par']
# REF CMDS
cactusADM.cmd = [cactusADM.executable] + ['benchADM.par']
#cactusADM.output = out_dir + 'cactusADM.out'
#437.leslie3d
leslie3d = Process() # Update June 7, 2017: This used to be LiveProcess()
leslie3d.executable = 'leslie3d'
# TEST CMDS
#leslie3d.cmd = [leslie3d.executable]
#leslie3d.input = 'leslie3d.in'
# REF CMDS
leslie3d.cmd = [leslie3d.executable]
leslie3d.input = 'leslie3d.in'
#leslie3d.output = out_dir + 'leslie3d.out'
#444.namd
namd = Process() # Update June 7, 2017: This used to be LiveProcess()
namd.executable = 'namd'
# TEST CMDS
#namd.cmd = [namd.executable] + ['--input', 'namd.input', '--output', 'namd.out', '--iterations', '1']
# REF CMDS
namd.cmd = [namd.executable] + ['--input', 'namd.input', '--output', 'namd.out', '--iterations', '38']
#namd.output = out_dir + 'namd.out'
#445.gobmk
gobmk = Process() # Update June 7, 2017: This used to be LiveProcess()
gobmk.executable = 'gobmk'
# TEST CMDS
#gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
#gobmk.input = 'dniwog.tst'
# REF CMDS
gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
gobmk.input = '13x13.tst'
#gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
#gobmk.input = 'nngs.tst'
#gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
#gobmk.input = 'score2.tst'
#gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
#gobmk.input = 'trevorc.tst'
#gobmk.cmd = [gobmk.executable] + ['--quiet','--mode', 'gtp']
#gobmk.input = 'trevord.tst'
#gobmk.output = out_dir + 'gobmk.out'
#447.dealII
####### NOT WORKING #########
dealII = Process() # Update June 7, 2017: This used to be LiveProcess()
dealII.executable = 'dealII'
# TEST CMDS
####### NOT WORKING #########
#dealII.cmd = [gobmk.executable]+['8']
# REF CMDS
####### NOT WORKING #########
#dealII.output = out_dir + 'dealII.out'
#450.soplex
soplex = Process() # Update June 7, 2017: This used to be LiveProcess()
soplex.executable = 'soplex'
# TEST CMDS
#soplex.cmd = [soplex.executable] + ['-m10000', 'test.mps']
# REF CMDS
soplex.cmd = [soplex.executable] + ['-m45000', 'pds-50.mps']
#soplex.cmd = [soplex.executable] + ['-m3500', 'ref.mps']
#soplex.output = out_dir + 'soplex.out'
#453.povray
povray = Process() # Update June 7, 2017: This used to be LiveProcess()
povray.executable = 'povray'
# TEST CMDS
#povray.cmd = [povray.executable] + ['SPEC-benchmark-test.ini']
# REF CMDS
povray.cmd = [povray.executable] + ['SPEC-benchmark-ref.ini']
#povray.output = out_dir + 'povray.out'
#454.calculix
calculix = Process() # Update June 7, 2017: This used to be LiveProcess()
calculix.executable = 'calculix'
# TEST CMDS
#calculix.cmd = [calculix.executable] + ['-i', 'beampic']
# REF CMDS
calculix.cmd = [calculix.executable] + ['-i', 'hyperviscoplastic']
#calculix.output = out_dir + 'calculix.out'
#456.hmmer
hmmer = Process() # Update June 7, 2017: This used to be LiveProcess()
hmmer.executable = 'hmmer'
# TEST CMDS
#hmmer.cmd = [hmmer.executable] + ['--fixed', '0', '--mean', '325', '--num', '45000', '--sd', '200', '--seed', '0', 'bombesin.hmm']
# REF CMDS
hmmer.cmd = [hmmer.executable] + ['nph3.hmm', 'swiss41']
#hmmer.cmd = [hmmer.executable] + ['--fixed', '0', '--mean', '500', '--num', '500000', '--sd', '350', '--seed', '0', 'retro.hmm']
#hmmer.output = out_dir + 'hmmer.out'
#458.sjeng
sjeng = Process() # Update June 7, 2017: This used to be LiveProcess()
sjeng.executable = 'sjeng'
# TEST CMDS
#sjeng.cmd = [sjeng.executable] + ['test.txt']
# REF CMDS
sjeng.cmd = [sjeng.executable] + ['ref.txt']
#sjeng.output = out_dir + 'sjeng.out'
#459.GemsFDTD
GemsFDTD = Process() # Update June 7, 2017: This used to be LiveProcess()
GemsFDTD.executable = 'GemsFDTD'
# TEST CMDS
#GemsFDTD.cmd = [GemsFDTD.executable]
# REF CMDS
GemsFDTD.cmd = [GemsFDTD.executable]
#GemsFDTD.output = out_dir + 'GemsFDTD.out'
#462.libquantum
libquantum = Process() # Update June 7, 2017: This used to be LiveProcess()
libquantum.executable = 'libquantum'
# TEST CMDS
#libquantum.cmd = [libquantum.executable] + ['33','5']
# REF CMDS [UPDATE 10/2/2015]: Sparsh Mittal has pointed out the correct input for libquantum should be 1397 and 8, not 1297 and 8. Thanks!
libquantum.cmd = [libquantum.executable] + ['1397','8']
#libquantum.output = out_dir + 'libquantum.out'
#464.h264ref
h264ref = Process() # Update June 7, 2017: This used to be LiveProcess()
h264ref.executable = 'h264ref'
# TEST CMDS
#h264ref.cmd = [h264ref.executable] + ['-d', 'foreman_test_encoder_baseline.cfg']
# REF CMDS
h264ref.cmd = [h264ref.executable] + ['-d', 'foreman_ref_encoder_baseline.cfg']
#h264ref.cmd = [h264ref.executable] + ['-d', 'foreman_ref_encoder_main.cfg']
#h264ref.cmd = [h264ref.executable] + ['-d', 'sss_encoder_main.cfg']
#h264ref.output = out_dir + 'h264ref.out'
#465.tonto
tonto = Process() # Update June 7, 2017: This used to be LiveProcess()
tonto.executable = 'tonto'
# TEST CMDS
#tonto.cmd = [tonto.executable]
# REF CMDS
tonto.cmd = [tonto.executable]
#tonto.output = out_dir + 'tonto.out'
#470.lbm
lbm = Process() # Update June 7, 2017: This used to be LiveProcess()
lbm.executable = 'lbm'
# TEST CMDS
#lbm.cmd = [lbm.executable] + ['20', 'reference.dat', '0', '1', '100_100_130_cf_a.of']
# REF CMDS
lbm.cmd = [lbm.executable] + ['300', 'reference.dat', '0', '0', '100_100_130_ldc.of']
#lbm.output = out_dir + 'lbm.out'
#471.omnetpp
omnetpp = Process() # Update June 7, 2017: This used to be LiveProcess()
omnetpp.executable = 'omnetpp'
# TEST CMDS
#omnetpp.cmd = [omnetpp.executable] + ['omnetpp.ini']
# REF CMDS
omnetpp.cmd = [omnetpp.executable] + ['omnetpp.ini']
#omnetpp.output = out_dir + 'omnetpp.out'
#473.astar
astar = Process() # Update June 7, 2017: This used to be LiveProcess()
astar.executable = 'astar'
# TEST CMDS
#astar.cmd = [astar.executable] + ['lake.cfg']
# REF CMDS
astar.cmd = [astar.executable] + ['rivers.cfg']
#astar.output = out_dir + 'astar.out'
#481.wrf
wrf = Process() # Update June 7, 2017: This used to be LiveProcess()
wrf.executable = 'wrf'
# TEST CMDS
#wrf.cmd = [wrf.executable]
# REF CMDS
wrf.cmd = [wrf.executable]
#wrf.output = out_dir + 'wrf.out'
#482.sphinx3
sphinx3 = Process() # Update June 7, 2017: This used to be LiveProcess()
sphinx3.executable = 'sphinx_livepretend'
# TEST CMDS
#sphinx3.cmd = [sphinx3.executable] + ['ctlfile', '.', 'args.an4']
# REF CMDS
sphinx3.cmd = [sphinx3.executable] + ['ctlfile', '.', 'args.an4']
#sphinx3.output = out_dir + 'sphinx3.out'
#483.xalancbmk
######## NOT WORKING ###########
xalancbmk = Process() # Update June 7, 2017: This used to be LiveProcess()
xalancbmk.executable = 'xalancbmk'
# TEST CMDS
######## NOT WORKING ###########
#xalancbmk.cmd = [xalancbmk.executable] + ['-v','test.xml','xalanc.xsl']
# REF CMDS
######## NOT WORKING ###########
#xalancbmk.output = out_dir + 'xalancbmk.out'
#998.specrand
specrand_i = Process() # Update June 7, 2017: This used to be LiveProcess()
specrand_i.executable = 'specrand'
# TEST CMDS
#specrand_i.cmd = [specrand_i.executable] + ['324342', '24239']
# REF CMDS
specrand_i.cmd = [specrand_i.executable] + ['1255432124', '234923']
#specrand_i.output = out_dir + 'specrand_i.out'
#999.specrand
specrand_f = Process() # Update June 7, 2017: This used to be LiveProcess()
specrand_f.executable = 'specrand'
# TEST CMDS
#specrand_f.cmd = [specrand_f.executable] + ['324342', '24239']
# REF CMDS
specrand_f.cmd = [specrand_f.executable] + ['1255432124', '234923']
#specrand_f.output = out_dir + 'specrand_f.out'
| 35.002994 | 139 | 0.667608 |
8cc4ed5ab07612d3c0ad3a23fe757c084e67efc4 | 2,977 | py | Python | back-end/migrations/env.py | Ay1c/flask-vuejs-madblog | 657bf10650e323f85bf1e0f914ab811f4f69acb3 | [
"MIT"
] | null | null | null | back-end/migrations/env.py | Ay1c/flask-vuejs-madblog | 657bf10650e323f85bf1e0f914ab811f4f69acb3 | [
"MIT"
] | 1 | 2021-01-23T07:21:28.000Z | 2021-01-23T07:21:28.000Z | back-end/migrations/env.py | Ay1c/flask-vuejs-madblog | 657bf10650e323f85bf1e0f914ab811f4f69acb3 | [
"MIT"
] | null | null | null | from __future__ import with_statement
import logging
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option(
'sqlalchemy.url',
str(current_app.extensions['migrate'].db.engine.url).replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
render_as_batch=True, # batch_alter_table 支持删除列
**current_app.extensions['migrate'].configure_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 30.377551 | 77 | 0.71179 |
8f4b329d970f400dc9687c0756b15931044a71d1 | 412 | py | Python | setup.py | shreyasnbhat/education-engineering | 91db0f016fd106846ddaa866da2c8bf1e6779509 | [
"MIT"
] | null | null | null | setup.py | shreyasnbhat/education-engineering | 91db0f016fd106846ddaa866da2c8bf1e6779509 | [
"MIT"
] | 38 | 2017-08-09T10:16:41.000Z | 2017-11-01T16:37:22.000Z | setup.py | shreyasnbhat/education-engineering | 91db0f016fd106846ddaa866da2c8bf1e6779509 | [
"MIT"
] | 7 | 2017-08-10T16:48:13.000Z | 2018-10-22T15:07:44.000Z | from setuptools import setup
##ToDO
setup(name='education-engineering',
version='0.1',
description='A tool to help students predict grades',
url='http://github.com/shreyasnbhat/education-engineering',
author='Shreyas and Gautam',
author_email='none@none.none',
packages=['app', 'tests'],
install_requires=['Flask>=0.2',
'SQLAlchemy>=0.6'],
zip_safe=False)
| 31.692308 | 65 | 0.650485 |
b9fcd5ed368a528ab0d7b8b8cade22fd6e8cdb34 | 17,283 | py | Python | zinnia/management/commands/wp2zinnia.py | zapier/django-blog-zinnia | 2631cbe05fa7b95aecd172fe34b7081ca4beda47 | [
"BSD-3-Clause"
] | null | null | null | zinnia/management/commands/wp2zinnia.py | zapier/django-blog-zinnia | 2631cbe05fa7b95aecd172fe34b7081ca4beda47 | [
"BSD-3-Clause"
] | 1 | 2021-09-08T12:32:28.000Z | 2021-09-08T12:32:28.000Z | zinnia/management/commands/wp2zinnia.py | isabella232/django-blog-zinnia | 2631cbe05fa7b95aecd172fe34b7081ca4beda47 | [
"BSD-3-Clause"
] | 1 | 2021-09-08T10:28:36.000Z | 2021-09-08T10:28:36.000Z | """WordPress to Zinnia command module"""
import os
import sys
from urllib2 import urlopen
from datetime import datetime
from optparse import make_option
from xml.etree import ElementTree as ET
from django.conf import settings
from django.utils import timezone
from django.core.files import File
from django.utils.text import Truncator
from django.utils.html import strip_tags
from django.db.utils import IntegrityError
from django.utils.encoding import smart_str
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.contrib import comments
from django.core.management.base import CommandError
from django.core.management.base import LabelCommand
from django.core.files.temp import NamedTemporaryFile
from tagging.models import Tag
from zinnia import __version__
from zinnia.models.entry import Entry
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.flags import get_user_flagger
from zinnia.flags import PINGBACK, TRACKBACK, SPAM
from zinnia.signals import disconnect_zinnia_signals
from zinnia.managers import DRAFT, HIDDEN, PUBLISHED
WP_NS = 'http://wordpress.org/export/%s/'
class Command(LabelCommand):
"""Command object for importing a WordPress blog
into Zinnia via a WordPress eXtended RSS (WXR) file."""
help = 'Import a Wordpress blog into Zinnia.'
label = 'WXR file'
args = 'wordpress.xml'
option_list = LabelCommand.option_list + (
make_option('--noautoexcerpt', action='store_false',
dest='auto_excerpt', default=True,
help='Do NOT generate an excerpt if not present.'),
make_option('--author', dest='author', default='',
help='All imported entries belong to specified author'),
)
SITE = Site.objects.get_current()
REVERSE_STATUS = {'pending': DRAFT,
'draft': DRAFT,
'auto-draft': DRAFT,
'inherit': DRAFT,
'publish': PUBLISHED,
'future': PUBLISHED,
'trash': HIDDEN,
'private': PUBLISHED}
def __init__(self):
"""Init the Command and add custom styles"""
super(Command, self).__init__()
self.style.TITLE = self.style.SQL_FIELD
self.style.STEP = self.style.SQL_COLTYPE
self.style.ITEM = self.style.HTTP_INFO
disconnect_zinnia_signals()
def write_out(self, message, verbosity_level=1):
"""Convenient method for outputing"""
if self.verbosity and self.verbosity >= verbosity_level:
sys.stdout.write(smart_str(message))
sys.stdout.flush()
def handle_label(self, wxr_file, **options):
global WP_NS
self.verbosity = int(options.get('verbosity', 1))
self.auto_excerpt = options.get('auto_excerpt', True)
self.default_author = options.get('author')
if self.default_author:
try:
self.default_author = Author.objects.get(
username=self.default_author)
except Author.DoesNotExist:
raise CommandError('Invalid username for default author')
self.write_out(self.style.TITLE(
'Starting migration from Wordpress to Zinnia %s:\n' % __version__))
tree = ET.parse(wxr_file)
WP_NS = WP_NS % self.guess_wxr_version(tree)
self.authors = self.import_authors(tree)
self.categories = self.import_categories(
tree.findall('channel/{%s}category' % WP_NS))
self.import_tags(tree.findall('channel/{%s}tag' % WP_NS))
self.import_entries(tree.findall('channel/item'))
def guess_wxr_version(self, tree):
"""We will try to guess the wxr version used
to complete the wordpress xml namespace name"""
for v in ('1.2', '1.1', '1.0'):
try:
tree.find('channel/{%s}wxr_version' % (WP_NS % v)).text
return v
except AttributeError:
pass
raise CommandError('Cannot resolve the wordpress namespace')
def import_authors(self, tree):
"""Retrieve all the authors used in posts
and convert it to new or existing author and
return the conversion"""
self.write_out(self.style.STEP('- Importing authors\n'))
post_authors = set()
for item in tree.findall('channel/item'):
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'post':
post_authors.add(item.find(
'{http://purl.org/dc/elements/1.1/}creator').text)
self.write_out('> %i authors found.\n' % len(post_authors))
authors = {}
for post_author in post_authors:
if self.default_author:
authors[post_author] = self.default_author
else:
authors[post_author] = self.migrate_author(
post_author.replace(' ', '-'))
return authors
def migrate_author(self, author_name):
"""Handle actions for migrating the authors"""
action_text = "The author '%s' needs to be migrated to an User:\n"\
"1. Use an existing user ?\n"\
"2. Create a new user ?\n"\
"Please select a choice: " % self.style.ITEM(author_name)
while 42:
selection = raw_input(smart_str(action_text))
if selection and selection in '12':
break
if selection == '1':
users = Author.objects.all()
if users.count() == 1:
username = users[0].username
preselected_user = username
usernames = [username]
usernames_display = ['[%s]' % username]
else:
usernames = []
usernames_display = []
preselected_user = None
for user in users:
username = user.username
if username == author_name:
usernames_display.append('[%s]' % username)
preselected_user = username
else:
usernames_display.append(username)
usernames.append(username)
while 42:
user_text = "1. Select your user, by typing " \
"one of theses usernames:\n"\
"%s or 'back'\n"\
"Please select a choice: " % \
', '.join(usernames_display)
user_selected = raw_input(user_text)
if user_selected in usernames:
break
if user_selected == '' and preselected_user:
user_selected = preselected_user
break
if user_selected.strip() == 'back':
return self.migrate_author(author_name)
return users.get(username=user_selected)
else:
create_text = "2. Please type the email of " \
"the '%s' user or 'back': " % author_name
author_mail = raw_input(create_text)
if author_mail.strip() == 'back':
return self.migrate_author(author_name)
try:
return Author.objects.create_user(author_name, author_mail)
except IntegrityError:
return Author.objects.get(username=author_name)
def import_categories(self, category_nodes):
"""Import all the categories from 'wp:category' nodes,
because categories in 'item' nodes are not necessarily
all the categories and returning it in a dict for
database optimizations."""
self.write_out(self.style.STEP('- Importing categories\n'))
categories = {}
for category_node in category_nodes:
title = category_node.find('{%s}cat_name' % WP_NS).text[:255]
slug = category_node.find(
'{%s}category_nicename' % WP_NS).text[:255]
try:
parent = category_node.find(
'{%s}category_parent' % WP_NS).text[:255]
except TypeError:
parent = None
self.write_out('> %s... ' % title)
category, created = Category.objects.get_or_create(
title=title, slug=slug, parent=categories.get(parent))
categories[title] = category
self.write_out(self.style.ITEM('OK\n'))
return categories
def import_tags(self, tag_nodes):
"""Import all the tags form 'wp:tag' nodes,
because tags in 'item' nodes are not necessarily
all the tags, then use only the nicename, because it's like
a slug and the true tag name may be not valid for url usage."""
self.write_out(self.style.STEP('- Importing tags\n'))
for tag_node in tag_nodes:
tag_name = tag_node.find(
'{%s}tag_slug' % WP_NS).text[:50]
self.write_out('> %s... ' % tag_name)
Tag.objects.get_or_create(name=tag_name)
self.write_out(self.style.ITEM('OK\n'))
def get_entry_tags(self, categories):
"""Return a list of entry's tags,
by using the nicename for url compatibility"""
tags = []
for category in categories:
domain = category.attrib.get('domain', 'category')
if domain == 'tag' and category.attrib.get('nicename'):
tags.append(category.attrib.get('nicename'))
return tags
def get_entry_categories(self, category_nodes):
"""Return a list of entry's categories
based of imported categories"""
categories = []
for category_node in category_nodes:
domain = category_node.attrib.get('domain')
if domain == 'category':
categories.append(self.categories[category_node.text])
return categories
def import_entry(self, title, content, item_node):
"""Importing an entry but some data are missing like
related entries, start_publication and end_publication.
start_publication and creation_date will use the same value,
wich is always in Wordpress $post->post_date"""
creation_date = datetime.strptime(
item_node.find('{%s}post_date' % WP_NS).text, '%Y-%m-%d %H:%M:%S')
if settings.USE_TZ:
creation_date = timezone.make_aware(creation_date, timezone.utc)
excerpt = item_node.find('{%sexcerpt/}encoded' % WP_NS).text
if not excerpt:
if self.auto_excerpt:
excerpt = Truncator(strip_tags(content)).words(50)
else:
excerpt = ''
entry_dict = {
'content': content,
'excerpt': excerpt,
# Prefer use this function than
# item_node.find('{%s}post_name' % WP_NS).text
# Because slug can be not well formated
'slug': slugify(title)[:255] or 'post-%s' % item_node.find(
'{%s}post_id' % WP_NS).text,
'tags': ', '.join(self.get_entry_tags(item_node.findall(
'category'))),
'status': self.REVERSE_STATUS[item_node.find(
'{%s}status' % WP_NS).text],
'comment_enabled': item_node.find(
'{%s}comment_status' % WP_NS).text == 'open',
'pingback_enabled': item_node.find(
'{%s}ping_status' % WP_NS).text == 'open',
'featured': item_node.find('{%s}is_sticky' % WP_NS).text == '1',
'password': item_node.find('{%s}post_password' % WP_NS).text or '',
'login_required': item_node.find(
'{%s}status' % WP_NS).text == 'private',
'creation_date': creation_date,
'last_update': timezone.now()}
entry, created = Entry.objects.get_or_create(
title=title, defaults=entry_dict)
entry.categories.add(*self.get_entry_categories(
item_node.findall('category')))
entry.authors.add(self.authors[item_node.find(
'{http://purl.org/dc/elements/1.1/}creator').text])
entry.sites.add(self.SITE)
#current_id = item_node.find('{%s}post_id' % WP_NS).text
#parent_id = item_node.find('%s}post_parent' % WP_NS).text
return entry
def find_image_id(self, metadatas):
for meta in metadatas:
if meta.find('{%s}meta_key' % WP_NS).text == '_thumbnail_id':
return meta.find('{%s}meta_value/' % WP_NS).text
def import_entries(self, items):
"""Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content."""
self.write_out(self.style.STEP('- Importing entries\n'))
for item_node in items:
title = (item_node.find('title').text or '')[:255]
post_type = item_node.find('{%s}post_type' % WP_NS).text
content = item_node.find(
'{http://purl.org/rss/1.0/modules/content/}encoded').text
if post_type == 'post' and content and title:
self.write_out('> %s... ' % title)
entry = self.import_entry(title, content, item_node)
self.write_out(self.style.ITEM('OK\n'))
image_id = self.find_image_id(
item_node.findall('{%s}postmeta' % WP_NS))
if image_id:
self.import_image(entry, items, image_id)
self.import_comments(entry, item_node.findall(
'{%s}comment/' % WP_NS))
else:
self.write_out('> %s... ' % title, 2)
self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2)
def import_image(self, entry, items, image_id):
for item in items:
post_type = item.find('{%s}post_type' % WP_NS).text
if post_type == 'attachment' and \
item.find('{%s}post_id' % WP_NS).text == image_id:
title = 'Attachment %s' % item.find('title').text
self.write_out(' > %s... ' % title)
image_url = item.find('{%s}attachment_url' % WP_NS).text
img_tmp = NamedTemporaryFile(delete=True)
img_tmp.write(urlopen(image_url).read())
img_tmp.flush()
entry.image.save(os.path.basename(image_url),
File(img_tmp))
self.write_out(self.style.ITEM('OK\n'))
def import_comments(self, entry, comment_nodes):
"""Loops over comments nodes and import then
in django.contrib.comments"""
for comment_node in comment_nodes:
is_pingback = comment_node.find(
'{%s}comment_type' % WP_NS).text == PINGBACK
is_trackback = comment_node.find(
'{%s}comment_type' % WP_NS).text == TRACKBACK
title = 'Comment #%s' % (comment_node.find(
'{%s}comment_id/' % WP_NS).text)
self.write_out(' > %s... ' % title)
content = comment_node.find(
'{%s}comment_content/' % WP_NS).text
if not content:
self.write_out(self.style.NOTICE('SKIPPED (unfilled)\n'))
return
submit_date = datetime.strptime(
comment_node.find('{%s}comment_date' % WP_NS).text,
'%Y-%m-%d %H:%M:%S')
if settings.USE_TZ:
submit_date = timezone.make_aware(submit_date, timezone.utc)
approvation = comment_node.find(
'{%s}comment_approved' % WP_NS).text
is_public = True
is_removed = False
if approvation != '1':
is_removed = True
if approvation == 'spam':
is_public = False
comment_dict = {
'content_object': entry,
'site': self.SITE,
'user_name': comment_node.find(
'{%s}comment_author/' % WP_NS).text[:50],
'user_email': comment_node.find(
'{%s}comment_author_email/' % WP_NS).text or '',
'user_url': comment_node.find(
'{%s}comment_author_url/' % WP_NS).text or '',
'comment': content,
'submit_date': submit_date,
'ip_address': comment_node.find(
'{%s}comment_author_IP/' % WP_NS).text or '',
'is_public': is_public,
'is_removed': is_removed, }
comment = comments.get_model()(**comment_dict)
comment.save()
if approvation == 'spam':
comment.flags.create(
user=get_user_flagger(), flag=SPAM)
if is_pingback:
comment.flags.create(
user=get_user_flagger(), flag=PINGBACK)
if is_trackback:
comment.flags.create(
user=get_user_flagger(), flag=TRACKBACK)
self.write_out(self.style.ITEM('OK\n'))
| 41.949029 | 79 | 0.567031 |
905465ddfbeba8d5fee3bebdd5f294159831e0be | 30,660 | py | Python | pattern.py | adivar99/Capstone-Website3.0 | d45af1d9dccf07e41a74fd0f97b1da9496abc884 | [
"CC-BY-3.0"
] | null | null | null | pattern.py | adivar99/Capstone-Website3.0 | d45af1d9dccf07e41a74fd0f97b1da9496abc884 | [
"CC-BY-3.0"
] | null | null | null | pattern.py | adivar99/Capstone-Website3.0 | d45af1d9dccf07e41a74fd0f97b1da9496abc884 | [
"CC-BY-3.0"
] | null | null | null | import tensorflow as tf
import numpy as np
import scipy.io
import argparse
import struct
import errno
import time
import cv2
import os
'''
parsing and configuration
'''
def parse_args():
desc = "TensorFlow implementation of 'A Neural Algorithm for Artistic Style'"
parser = argparse.ArgumentParser(description=desc)
# options for single image
parser.add_argument('--verbose', action='store_true',
help='Boolean flag indicating if statements should be printed to the console.')
parser.add_argument('--img_name', type=str,
default='result',
help='Filename of the output image.')
parser.add_argument('--style_imgs', nargs='+', type=str,
help='Filenames of the style images (example: starry-night.jpg)',
required=True)
parser.add_argument('--style_imgs_weights', nargs='+', type=float,
default=[1.0],
help='Interpolation weights of each of the style images. (example: 0.5 0.5)')
parser.add_argument('--content_img', type=str,
help='Filename of the content image (example: lion.jpg)')
parser.add_argument('--style_imgs_dir', type=str,
default='./styles',
help='Directory path to the style images. (default: %(default)s)')
parser.add_argument('--content_img_dir', type=str,
default='./image_input',
help='Directory path to the content image. (default: %(default)s)')
parser.add_argument('--init_img_type', type=str,
default='content',
choices=['random', 'content', 'style'],
help='Image used to initialize the network. (default: %(default)s)')
parser.add_argument('--max_size', type=int,
default=512,
help='Maximum width or height of the input images. (default: %(default)s)')
parser.add_argument('--content_weight', type=float,
default=5e0,
help='Weight for the content loss function. (default: %(default)s)')
parser.add_argument('--style_weight', type=float,
default=1e4,
help='Weight for the style loss function. (default: %(default)s)')
parser.add_argument('--tv_weight', type=float,
default=1e-3,
help='Weight for the total variational loss function. Set small (e.g. 1e-3). (default: %(default)s)')
parser.add_argument('--temporal_weight', type=float,
default=2e2,
help='Weight for the temporal loss function. (default: %(default)s)')
parser.add_argument('--content_loss_function', type=int,
default=1,
choices=[1, 2, 3],
help='Different constants for the content layer loss function. (default: %(default)s)')
parser.add_argument('--content_layers', nargs='+', type=str,
default=['conv4_2'],
help='VGG19 layers used for the content image. (default: %(default)s)')
parser.add_argument('--style_layers', nargs='+', type=str,
default=['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'],
help='VGG19 layers used for the style image. (default: %(default)s)')
parser.add_argument('--content_layer_weights', nargs='+', type=float,
default=[1.0],
help='Contributions (weights) of each content layer to loss. (default: %(default)s)')
parser.add_argument('--style_layer_weights', nargs='+', type=float,
default=[0.2, 0.2, 0.2, 0.2, 0.2],
help='Contributions (weights) of each style layer to loss. (default: %(default)s)')
parser.add_argument('--original_colors', action='store_true',
help='Transfer the style but not the colors.')
parser.add_argument('--color_convert_type', type=str,
default='yuv',
choices=['yuv', 'ycrcb', 'luv', 'lab'],
help='Color space for conversion to original colors (default: %(default)s)')
parser.add_argument('--color_convert_time', type=str,
default='after',
choices=['after', 'before'],
help='Time (before or after) to convert to original colors (default: %(default)s)')
parser.add_argument('--style_mask', action='store_true',
help='Transfer the style to masked regions.')
parser.add_argument('--style_mask_imgs', nargs='+', type=str,
default=None,
help='Filenames of the style mask images (example: face_mask.png) (default: %(default)s)')
parser.add_argument('--noise_ratio', type=float,
default=1.0,
help="Interpolation value between the content image and noise image if the network is initialized with 'random'.")
parser.add_argument('--seed', type=int,
default=0,
help='Seed for the random number generator. (default: %(default)s)')
parser.add_argument('--model_weights', type=str,
default='imagenet-vgg-verydeep-19.mat',
help='Weights and biases of the VGG-19 network.')
parser.add_argument('--pooling_type', type=str,
default='avg',
choices=['avg', 'max'],
help='Type of pooling in convolutional neural network. (default: %(default)s)')
parser.add_argument('--device', type=str,
default='/gpu:0',
choices=['/gpu:0', '/cpu:0'],
help='GPU or CPU mode. GPU mode requires NVIDIA CUDA. (default|recommended: %(default)s)')
parser.add_argument('--img_output_dir', type=str,
default='./image_output',
help='Relative or absolute directory path to output image and data.')
# optimizations
parser.add_argument('--optimizer', type=str,
default='lbfgs',
choices=['lbfgs', 'adam'],
help='Loss minimization optimizer. L-BFGS gives better results. Adam uses less memory. (default|recommended: %(default)s)')
parser.add_argument('--learning_rate', type=float,
default=1e0,
help='Learning rate parameter for the Adam optimizer. (default: %(default)s)')
parser.add_argument('--max_iterations', type=int,
default=1000,
help='Max number of iterations for the Adam or L-BFGS optimizer. (default: %(default)s)')
parser.add_argument('--print_iterations', type=int,
default=20,
help='Number of iterations between optimizer print statements. (default: %(default)s)')
# options for video frames
parser.add_argument('--video', action='store_true',
help='Boolean flag indicating if the user is generating a video.')
parser.add_argument('--start_frame', type=int,
default=1,
help='First frame number.')
parser.add_argument('--end_frame', type=int,
default=1,
help='Last frame number.')
parser.add_argument('--first_frame_type', type=str,
choices=['random', 'content', 'style'],
default='content',
help='Image used to initialize the network during the rendering of the first frame.')
parser.add_argument('--init_frame_type', type=str,
choices=['prev_warped', 'prev', 'random', 'content', 'style'],
default='prev_warped',
help='Image used to initialize the network during the every rendering after the first frame.')
parser.add_argument('--video_input_dir', type=str,
default='./video_input',
help='Relative or absolute directory path to input frames.')
parser.add_argument('--video_output_dir', type=str,
default='./video_output',
help='Relative or absolute directory path to output frames.')
parser.add_argument('--content_frame_frmt', type=str,
default='frame_{}.ppm',
help='Filename format of the input content frames.')
parser.add_argument('--backward_optical_flow_frmt', type=str,
default='backward_{}_{}.flo',
help='Filename format of the backward optical flow files.')
parser.add_argument('--forward_optical_flow_frmt', type=str,
default='forward_{}_{}.flo',
help='Filename format of the forward optical flow files')
parser.add_argument('--content_weights_frmt', type=str,
default='reliable_{}_{}.txt',
help='Filename format of the optical flow consistency files.')
parser.add_argument('--prev_frame_indices', nargs='+', type=int,
default=[1],
help='Previous frames to consider for longterm temporal consistency.')
parser.add_argument('--first_frame_iterations', type=int,
default=2000,
help='Maximum number of optimizer iterations of the first frame. (default: %(default)s)')
parser.add_argument('--frame_iterations', type=int,
default=800,
help='Maximum number of optimizer iterations for each frame after the first frame. (default: %(default)s)')
args = parser.parse_args()
# normalize weights
args.style_layer_weights = normalize(args.style_layer_weights)
args.content_layer_weights = normalize(args.content_layer_weights)
args.style_imgs_weights = normalize(args.style_imgs_weights)
# create directories for output
if args.video:
maybe_make_directory(args.video_output_dir)
else:
maybe_make_directory(args.img_output_dir)
return args
'''
pre-trained vgg19 convolutional neural network
remark: layers are manually initialized for clarity.
'''
def build_model(input_img):
if args.verbose: print('\nBUILDING VGG-19 NETWORK')
net = {}
_, h, w, d = input_img.shape
if args.verbose: print('loading model weights...')
vgg_rawnet = scipy.io.loadmat(args.model_weights)
vgg_layers = vgg_rawnet['layers'][0]
if args.verbose: print('constructing layers...')
net['input'] = tf.Variable(np.zeros((1, h, w, d), dtype=np.float32))
if args.verbose: print('LAYER GROUP 1')
net['conv1_1'] = conv_layer('conv1_1', net['input'], W=get_weights(vgg_layers, 0))
net['relu1_1'] = relu_layer('relu1_1', net['conv1_1'], b=get_bias(vgg_layers, 0))
net['conv1_2'] = conv_layer('conv1_2', net['relu1_1'], W=get_weights(vgg_layers, 2))
net['relu1_2'] = relu_layer('relu1_2', net['conv1_2'], b=get_bias(vgg_layers, 2))
net['pool1'] = pool_layer('pool1', net['relu1_2'])
if args.verbose: print('LAYER GROUP 2')
net['conv2_1'] = conv_layer('conv2_1', net['pool1'], W=get_weights(vgg_layers, 5))
net['relu2_1'] = relu_layer('relu2_1', net['conv2_1'], b=get_bias(vgg_layers, 5))
net['conv2_2'] = conv_layer('conv2_2', net['relu2_1'], W=get_weights(vgg_layers, 7))
net['relu2_2'] = relu_layer('relu2_2', net['conv2_2'], b=get_bias(vgg_layers, 7))
net['pool2'] = pool_layer('pool2', net['relu2_2'])
if args.verbose: print('LAYER GROUP 3')
net['conv3_1'] = conv_layer('conv3_1', net['pool2'], W=get_weights(vgg_layers, 10))
net['relu3_1'] = relu_layer('relu3_1', net['conv3_1'], b=get_bias(vgg_layers, 10))
net['conv3_2'] = conv_layer('conv3_2', net['relu3_1'], W=get_weights(vgg_layers, 12))
net['relu3_2'] = relu_layer('relu3_2', net['conv3_2'], b=get_bias(vgg_layers, 12))
net['conv3_3'] = conv_layer('conv3_3', net['relu3_2'], W=get_weights(vgg_layers, 14))
net['relu3_3'] = relu_layer('relu3_3', net['conv3_3'], b=get_bias(vgg_layers, 14))
net['conv3_4'] = conv_layer('conv3_4', net['relu3_3'], W=get_weights(vgg_layers, 16))
net['relu3_4'] = relu_layer('relu3_4', net['conv3_4'], b=get_bias(vgg_layers, 16))
net['pool3'] = pool_layer('pool3', net['relu3_4'])
if args.verbose: print('LAYER GROUP 4')
net['conv4_1'] = conv_layer('conv4_1', net['pool3'], W=get_weights(vgg_layers, 19))
net['relu4_1'] = relu_layer('relu4_1', net['conv4_1'], b=get_bias(vgg_layers, 19))
net['conv4_2'] = conv_layer('conv4_2', net['relu4_1'], W=get_weights(vgg_layers, 21))
net['relu4_2'] = relu_layer('relu4_2', net['conv4_2'], b=get_bias(vgg_layers, 21))
net['conv4_3'] = conv_layer('conv4_3', net['relu4_2'], W=get_weights(vgg_layers, 23))
net['relu4_3'] = relu_layer('relu4_3', net['conv4_3'], b=get_bias(vgg_layers, 23))
net['conv4_4'] = conv_layer('conv4_4', net['relu4_3'], W=get_weights(vgg_layers, 25))
net['relu4_4'] = relu_layer('relu4_4', net['conv4_4'], b=get_bias(vgg_layers, 25))
net['pool4'] = pool_layer('pool4', net['relu4_4'])
if args.verbose: print('LAYER GROUP 5')
net['conv5_1'] = conv_layer('conv5_1', net['pool4'], W=get_weights(vgg_layers, 28))
net['relu5_1'] = relu_layer('relu5_1', net['conv5_1'], b=get_bias(vgg_layers, 28))
net['conv5_2'] = conv_layer('conv5_2', net['relu5_1'], W=get_weights(vgg_layers, 30))
net['relu5_2'] = relu_layer('relu5_2', net['conv5_2'], b=get_bias(vgg_layers, 30))
net['conv5_3'] = conv_layer('conv5_3', net['relu5_2'], W=get_weights(vgg_layers, 32))
net['relu5_3'] = relu_layer('relu5_3', net['conv5_3'], b=get_bias(vgg_layers, 32))
net['conv5_4'] = conv_layer('conv5_4', net['relu5_3'], W=get_weights(vgg_layers, 34))
net['relu5_4'] = relu_layer('relu5_4', net['conv5_4'], b=get_bias(vgg_layers, 34))
net['pool5'] = pool_layer('pool5', net['relu5_4'])
return net
def conv_layer(layer_name, layer_input, W):
conv = tf.nn.conv2d(layer_input, W, strides=[1, 1, 1, 1], padding='SAME')
if args.verbose: print('--{} | shape={} | weights_shape={}'.format(layer_name,
conv.get_shape(), W.get_shape()))
return conv
def relu_layer(layer_name, layer_input, b):
relu = tf.nn.relu(layer_input + b)
if args.verbose:
print('--{} | shape={} | bias_shape={}'.format(layer_name, relu.get_shape(),
b.get_shape()))
return relu
def pool_layer(layer_name, layer_input):
if args.pooling_type == 'avg':
pool = tf.nn.avg_pool(layer_input, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
elif args.pooling_type == 'max':
pool = tf.nn.max_pool(layer_input, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
if args.verbose:
print('--{} | shape={}'.format(layer_name, pool.get_shape()))
return pool
def get_weights(vgg_layers, i):
weights = vgg_layers[i][0][0][2][0][0]
W = tf.constant(weights)
return W
def get_bias(vgg_layers, i):
bias = vgg_layers[i][0][0][2][0][1]
b = tf.constant(np.reshape(bias, (bias.size)))
return b
'''
'a neural algorithm for artistic style' loss functions
'''
def content_layer_loss(p, x):
_, h, w, d = p.get_shape()
M = h.value * w.value
N = d.value
if args.content_loss_function == 1:
K = 1. / (2. * N**0.5 * M**0.5)
elif args.content_loss_function == 2:
K = 1. / (N * M)
elif args.content_loss_function == 3:
K = 1. / 2.
loss = K * tf.reduce_sum(tf.pow((x - p), 2))
return loss
def style_layer_loss(a, x):
_, h, w, d = a.get_shape()
M = h.value * w.value
N = d.value
A = gram_matrix(a, M, N)
G = gram_matrix(x, M, N)
loss = (1./(4 * N**2 * M**2)) * tf.reduce_sum(tf.pow((G - A), 2))
return loss
def gram_matrix(x, area, depth):
F = tf.reshape(x, (area, depth))
G = tf.matmul(tf.transpose(F), F)
return G
def mask_style_layer(a, x, mask_img):
_, h, w, d = a.get_shape()
mask = get_mask_image(mask_img, w.value, h.value)
mask = tf.convert_to_tensor(mask)
tensors = []
for _ in range(d.value):
tensors.append(mask)
mask = tf.stack(tensors, axis=2)
mask = tf.stack(mask, axis=0)
mask = tf.expand_dims(mask, 0)
a = tf.multiply(a, mask)
x = tf.multiply(x, mask)
return a, x
def sum_masked_style_losses(sess, net, style_imgs):
total_style_loss = 0.
weights = args.style_imgs_weights
masks = args.style_mask_imgs
for img, img_weight, img_mask in zip(style_imgs, weights, masks):
sess.run(net['input'].assign(img))
style_loss = 0.
for layer, weight in zip(args.style_layers, args.style_layer_weights):
a = sess.run(net[layer])
x = net[layer]
a = tf.convert_to_tensor(a)
a, x = mask_style_layer(a, x, img_mask)
style_loss += style_layer_loss(a, x) * weight
style_loss /= float(len(args.style_layers))
total_style_loss += (style_loss * img_weight)
total_style_loss /= float(len(style_imgs))
return total_style_loss
def sum_style_losses(sess, net, style_imgs):
total_style_loss = 0.
weights = args.style_imgs_weights
for img, img_weight in zip(style_imgs, weights):
sess.run(net['input'].assign(img))
style_loss = 0.
for layer, weight in zip(args.style_layers, args.style_layer_weights):
a = sess.run(net[layer])
x = net[layer]
a = tf.convert_to_tensor(a)
style_loss += style_layer_loss(a, x) * weight
style_loss /= float(len(args.style_layers))
total_style_loss += (style_loss * img_weight)
total_style_loss /= float(len(style_imgs))
return total_style_loss
def sum_content_losses(sess, net, content_img):
sess.run(net['input'].assign(content_img))
content_loss = 0.
for layer, weight in zip(args.content_layers, args.content_layer_weights):
p = sess.run(net[layer])
x = net[layer]
p = tf.convert_to_tensor(p)
content_loss += content_layer_loss(p, x) * weight
content_loss /= float(len(args.content_layers))
return content_loss
'''
'artistic style transfer for videos' loss functions
'''
def temporal_loss(x, w, c):
c = c[np.newaxis,:,:,:]
D = float(x.size)
loss = (1. / D) * tf.reduce_sum(c * tf.nn.l2_loss(x - w))
loss = tf.cast(loss, tf.float32)
return loss
def get_longterm_weights(i, j):
c_sum = 0.
for k in range(args.prev_frame_indices):
if i - k > i - j:
c_sum += get_content_weights(i, i - k)
c = get_content_weights(i, i - j)
c_max = tf.maximum(c - c_sum, 0.)
return c_max
def sum_longterm_temporal_losses(sess, net, frame, input_img):
x = sess.run(net['input'].assign(input_img))
loss = 0.
for j in range(args.prev_frame_indices):
prev_frame = frame - j
w = get_prev_warped_frame(frame)
c = get_longterm_weights(frame, prev_frame)
loss += temporal_loss(x, w, c)
return loss
def sum_shortterm_temporal_losses(sess, net, frame, input_img):
x = sess.run(net['input'].assign(input_img))
prev_frame = frame - 1
w = get_prev_warped_frame(frame)
c = get_content_weights(frame, prev_frame)
loss = temporal_loss(x, w, c)
return loss
'''
utilities and i/o
'''
def read_image(path):
# bgr image
img = cv2.imread(path, cv2.IMREAD_COLOR)
check_image(img, path)
img = img.astype(np.float32)
img = preprocess(img)
return img
def write_image(path, img):
img = postprocess(img)
cv2.imwrite(path, img)
def preprocess(img):
imgpre = np.copy(img)
# bgr to rgb
imgpre = imgpre[...,::-1]
# shape (h, w, d) to (1, h, w, d)
imgpre = imgpre[np.newaxis,:,:,:]
imgpre -= np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
return imgpre
def postprocess(img):
imgpost = np.copy(img)
imgpost += np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
# shape (1, h, w, d) to (h, w, d)
imgpost = imgpost[0]
imgpost = np.clip(imgpost, 0, 255).astype('uint8')
# rgb to bgr
imgpost = imgpost[...,::-1]
return imgpost
def read_flow_file(path):
with open(path, 'rb') as f:
# 4 bytes header
header = struct.unpack('4s', f.read(4))[0]
# 4 bytes width, height
w = struct.unpack('i', f.read(4))[0]
h = struct.unpack('i', f.read(4))[0]
flow = np.ndarray((2, h, w), dtype=np.float32)
for y in range(h):
for x in range(w):
flow[0,y,x] = struct.unpack('f', f.read(4))[0]
flow[1,y,x] = struct.unpack('f', f.read(4))[0]
return flow
def read_weights_file(path):
lines = open(path).readlines()
header = list(map(int, lines[0].split(' ')))
w = header[0]
h = header[1]
vals = np.zeros((h, w), dtype=np.float32)
for i in range(1, len(lines)):
line = lines[i].rstrip().split(' ')
vals[i-1] = np.array(list(map(np.float32, line)))
vals[i-1] = list(map(lambda x: 0. if x < 255. else 1., vals[i-1]))
# expand to 3 channels
weights = np.dstack([vals.astype(np.float32)] * 3)
return weights
def normalize(weights):
denom = sum(weights)
if denom > 0.:
return [float(i) / denom for i in weights]
else: return [0.] * len(weights)
def maybe_make_directory(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def check_image(img, path):
if img is None:
raise OSError(errno.ENOENT, "No such file", path)
'''
rendering -- where the magic happens
'''
def stylize(content_img, style_imgs, init_img, frame=None):
with tf.device(args.device), tf.compat.v1.Session() as sess:
# setup network
net = build_model(content_img)
# style loss
if args.style_mask:
L_style = sum_masked_style_losses(sess, net, style_imgs)
else:
L_style = sum_style_losses(sess, net, style_imgs)
# content loss
L_content = sum_content_losses(sess, net, content_img)
# denoising loss
L_tv = tf.image.total_variation(net['input'])
# loss weights
alpha = args.content_weight
beta = args.style_weight
theta = args.tv_weight
# total loss
L_total = alpha * L_content
L_total += beta * L_style
L_total += theta * L_tv
# video temporal loss
if args.video and frame > 1:
gamma = args.temporal_weight
L_temporal = sum_shortterm_temporal_losses(sess, net, frame, init_img)
L_total += gamma * L_temporal
# optimization algorithm
optimizer = get_optimizer(L_total)
if args.optimizer == 'adam':
minimize_with_adam(sess, net, optimizer, init_img, L_total)
elif args.optimizer == 'lbfgs':
minimize_with_lbfgs(sess, net, optimizer, init_img)
output_img = sess.run(net['input'])
if args.original_colors:
output_img = convert_to_original_colors(np.copy(content_img), output_img)
if args.video:
write_video_output(frame, output_img)
else:
write_image_output(output_img, content_img, style_imgs, init_img)
def minimize_with_lbfgs(sess, net, optimizer, init_img):
if args.verbose: print('\nMINIMIZING LOSS USING: L-BFGS OPTIMIZER')
init_op = tf.global_variables_initializer()
sess.run(init_op)
sess.run(net['input'].assign(init_img))
optimizer.minimize(sess)
def minimize_with_adam(sess, net, optimizer, init_img, loss):
if args.verbose: print('\nMINIMIZING LOSS USING: ADAM OPTIMIZER')
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()
sess.run(init_op)
sess.run(net['input'].assign(init_img))
iterations = 0
while (iterations < args.max_iterations):
sess.run(train_op)
if iterations % args.print_iterations == 0 and args.verbose:
curr_loss = loss.eval()
print("At iterate {}\tf= {}".format(iterations, curr_loss))
iterations += 1
def get_optimizer(loss):
print_iterations = args.print_iterations if args.verbose else 0
if args.optimizer == 'lbfgs':
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
loss, method='L-BFGS-B',
options={'maxiter': args.max_iterations,
'disp': print_iterations})
elif args.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(args.learning_rate)
return optimizer
def write_video_output(frame, output_img):
fn = args.content_frame_frmt.format(str(frame).zfill(4))
path = os.path.join(args.video_output_dir, fn)
write_image(path, output_img)
def write_image_output(output_img, content_img, style_imgs, init_img):
out_dir = os.path.join(args.img_output_dir, args.img_name)
maybe_make_directory(out_dir)
img_path = os.path.join(out_dir, args.img_name+'.png')
content_path = os.path.join(out_dir, 'content.png')
init_path = os.path.join(out_dir, 'init.png')
write_image(img_path, output_img)
write_image(content_path, content_img)
write_image(init_path, init_img)
index = 0
for style_img in style_imgs:
path = os.path.join(out_dir, 'style_'+str(index)+'.png')
write_image(path, style_img)
index += 1
# save the configuration settings
out_file = os.path.join(out_dir, 'meta_data.txt')
f = open(out_file, 'w')
f.write('image_name: {}\n'.format(args.img_name))
f.write('content: {}\n'.format(args.content_img))
index = 0
for style_img, weight in zip(args.style_imgs, args.style_imgs_weights):
f.write('styles['+str(index)+']: {} * {}\n'.format(weight, style_img))
index += 1
index = 0
if args.style_mask_imgs is not None:
for mask in args.style_mask_imgs:
f.write('style_masks['+str(index)+']: {}\n'.format(mask))
index += 1
f.write('init_type: {}\n'.format(args.init_img_type))
f.write('content_weight: {}\n'.format(args.content_weight))
f.write('style_weight: {}\n'.format(args.style_weight))
f.write('tv_weight: {}\n'.format(args.tv_weight))
f.write('content_layers: {}\n'.format(args.content_layers))
f.write('style_layers: {}\n'.format(args.style_layers))
f.write('optimizer_type: {}\n'.format(args.optimizer))
f.write('max_iterations: {}\n'.format(args.max_iterations))
f.write('max_image_size: {}\n'.format(args.max_size))
f.close()
'''
image loading and processing
'''
def get_init_image(init_type, content_img, style_imgs, frame=None):
if init_type == 'content':
return content_img
elif init_type == 'style':
return style_imgs[0]
elif init_type == 'random':
init_img = get_noise_image(args.noise_ratio, content_img)
return init_img
# only for video frames
elif init_type == 'prev':
init_img = get_prev_frame(frame)
return init_img
elif init_type == 'prev_warped':
init_img = get_prev_warped_frame(frame)
return init_img
def get_content_frame(frame):
fn = args.content_frame_frmt.format(str(frame).zfill(4))
path = os.path.join(args.video_input_dir, fn)
img = read_image(path)
return img
def get_content_image(content_img):
path = os.path.join(args.content_img_dir, content_img)
# bgr image
img = cv2.imread(path, cv2.IMREAD_COLOR)
check_image(img, path)
img = img.astype(np.float32)
h, w, d = img.shape
mx = args.max_size
# resize if > max size
if h > w and h > mx:
w = (float(mx) / float(h)) * w
img = cv2.resize(img, dsize=(int(w), mx), interpolation=cv2.INTER_AREA)
if w > mx:
h = (float(mx) / float(w)) * h
img = cv2.resize(img, dsize=(mx, int(h)), interpolation=cv2.INTER_AREA)
img = preprocess(img)
return img
def get_style_images(content_img):
_, ch, cw, cd = content_img.shape
style_imgs = []
for style_fn in args.style_imgs:
path = os.path.join(args.style_imgs_dir, style_fn)
# bgr image
img = cv2.imread(path, cv2.IMREAD_COLOR)
check_image(img, path)
img = img.astype(np.float32)
img = cv2.resize(img, dsize=(cw, ch), interpolation=cv2.INTER_AREA)
img = preprocess(img)
style_imgs.append(img)
return style_imgs
def get_noise_image(noise_ratio, content_img):
np.random.seed(args.seed)
noise_img = np.random.uniform(-20., 20., content_img.shape).astype(np.float32)
img = noise_ratio * noise_img + (1.-noise_ratio) * content_img
return img
def get_mask_image(mask_img, width, height):
path = os.path.join(args.content_img_dir, mask_img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
check_image(img, path)
img = cv2.resize(img, dsize=(width, height), interpolation=cv2.INTER_AREA)
img = img.astype(np.float32)
mx = np.amax(img)
img /= mx
return img
def get_prev_frame(frame):
# previously stylized frame
prev_frame = frame - 1
fn = args.content_frame_frmt.format(str(prev_frame).zfill(4))
path = os.path.join(args.video_output_dir, fn)
img = cv2.imread(path, cv2.IMREAD_COLOR)
check_image(img, path)
return img
def get_prev_warped_frame(frame):
prev_img = get_prev_frame(frame)
prev_frame = frame - 1
# backwards flow: current frame -> previous frame
fn = args.backward_optical_flow_frmt.format(str(frame), str(prev_frame))
path = os.path.join(args.video_input_dir, fn)
flow = read_flow_file(path)
warped_img = warp_image(prev_img, flow).astype(np.float32)
img = preprocess(warped_img)
return img
def get_content_weights(frame, prev_frame):
forward_fn = args.content_weights_frmt.format(str(prev_frame), str(frame))
backward_fn = args.content_weights_frmt.format(str(frame), str(prev_frame))
forward_path = os.path.join(args.video_input_dir, forward_fn)
backward_path = os.path.join(args.video_input_dir, backward_fn)
forward_weights = read_weights_file(forward_path)
backward_weights = read_weights_file(backward_path)
return forward_weights #, backward_weights
def warp_image(src, flow):
_, h, w = flow.shape
flow_map = np.zeros(flow.shape, dtype=np.float32)
for y in range(h):
flow_map[1,y,:] = float(y) + flow[1,y,:]
for x in range(w):
flow_map[0,:,x] = float(x) + flow[0,:,x]
# remap pixels to optical flow
dst = cv2.remap(
src, flow_map[0], flow_map[1],
interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT)
return dst
def convert_to_original_colors(content_img, stylized_img):
content_img = postprocess(content_img)
stylized_img = postprocess(stylized_img)
if args.color_convert_type == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif args.color_convert_type == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif args.color_convert_type == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif args.color_convert_type == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(content_img, cvt_type)
stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
dst = preprocess(dst)
return dst
def render_single_image():
content_img = get_content_image(args.content_img)
style_imgs = get_style_images(content_img)
with tf.Graph().as_default():
print('\n---- RENDERING SINGLE IMAGE ----\n')
init_img = get_init_image(args.init_img_type, content_img, style_imgs)
tick = time.time()
stylize(content_img, style_imgs, init_img)
tock = time.time()
print('Single image elapsed time: {}'.format(tock - tick))
def render_video():
for frame in range(args.start_frame, args.end_frame+1):
with tf.Graph().as_default():
print('\n---- RENDERING VIDEO FRAME: {}/{} ----\n'.format(frame, args.end_frame))
if frame == 1:
content_frame = get_content_frame(frame)
style_imgs = get_style_images(content_frame)
init_img = get_init_image(args.first_frame_type, content_frame, style_imgs, frame)
args.max_iterations = args.first_frame_iterations
tick = time.time()
stylize(content_frame, style_imgs, init_img, frame)
tock = time.time()
print('Frame {} elapsed time: {}'.format(frame, tock - tick))
else:
content_frame = get_content_frame(frame)
style_imgs = get_style_images(content_frame)
init_img = get_init_image(args.init_frame_type, content_frame, style_imgs, frame)
args.max_iterations = args.frame_iterations
tick = time.time()
stylize(content_frame, style_imgs, init_img, frame)
tock = time.time()
print('Frame {} elapsed time: {}'.format(frame, tock - tick))
def main():
global args
args = parse_args()
if args.video: render_video()
else: render_single_image()
if __name__ == '__main__':
main() | 35.734266 | 129 | 0.681539 |
a7c7c66ce8d669fa897fb2c805e2f438895c5f4f | 1,106 | py | Python | cvxpy/reductions/eliminate_pwl/atom_canonicalizers/cummax_canon.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/reductions/eliminate_pwl/atom_canonicalizers/cummax_canon.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/reductions/eliminate_pwl/atom_canonicalizers/cummax_canon.py | QiuWJX/cvxpy | fd1c225b0cdf541618e292cae1a4c7ea25ddc934 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.variable import Variable
def cummax_canon(expr, args):
"""Cumulative max.
"""
X = args[0]
axis = expr.axis
# Implicit O(n) definition:
# Y_{k} = maximum(Y_{k-1}, X_k)
Y = Variable(expr.shape)
constr = [X <= Y]
if axis == 0:
if expr.shape[0] == 1:
return X, []
else:
constr += [Y[:-1] <= Y[1:]]
else:
if expr.shape[1] == 1:
return X, []
else:
constr += [Y[:, :-1] <= Y[:, 1:]]
return Y, constr
| 27.65 | 72 | 0.626582 |
5efbaaabf00e8a7c895503d91145ab8dc620124b | 261,826 | py | Python | ec2_compare/internal/hibernation/true.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | null | null | null | ec2_compare/internal/hibernation/true.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | null | null | null | ec2_compare/internal/hibernation/true.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | null | null | null |
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 3840, 'TotalSizeInGB': 4, 'Disks': [{'SizeInGB': 4, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.medium', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 4, 'Disks': [{'SizeInGB': 4, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.micro', 'CurrentGeneration': True, 'FreeTierEligible': True, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 512, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15616, 'TotalSizeInGB': 475, 'Disks': [{'SizeInGB': 475, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15616}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 475, 'Disks': [{'SizeInGB': 475, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 7680, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 450, 'BaselineThroughputInMBps': 56.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 450, 'MaximumThroughputInMBps': 56.25, 'MaximumIops': 3600}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 450, 'BaselineThroughputInMBps': 56.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 450, 'MaximumThroughputInMBps': 56.25, 'MaximumIops': 3600}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15616, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15616}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 347, 'BaselineThroughputInMBps': 43.375, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 347, 'BaselineThroughputInMBps': 43.375, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 87, 'BaselineThroughputInMBps': 10.875, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.micro', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 87, 'BaselineThroughputInMBps': 10.875, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 512, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 43, 'BaselineThroughputInMBps': 5.375, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 43, 'BaselineThroughputInMBps': 5.375, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 174, 'BaselineThroughputInMBps': 21.75, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 174, 'BaselineThroughputInMBps': 21.75, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 350, 'BaselineThroughputInMBps': 43.75, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 350, 'BaselineThroughputInMBps': 43.75, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 90, 'BaselineThroughputInMBps': 11.25, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.micro', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 90, 'BaselineThroughputInMBps': 11.25, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 512, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 45, 'BaselineThroughputInMBps': 5.625, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 45, 'BaselineThroughputInMBps': 5.625, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 175, 'BaselineThroughputInMBps': 21.875, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 175, 'BaselineThroughputInMBps': 21.875, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1], 'SizeInMiB': 15360, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 80, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 80, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.4xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 16000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 320, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.4xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 320, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 18750}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 18750}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.8xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4000, 'BaselineThroughputInMBps': 500.0, 'BaselineIops': 32000, 'MaximumBandwidthInMbps': 4000, 'MaximumThroughputInMBps': 500.0, 'MaximumIops': 32000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4000, 'BaselineThroughputInMBps': 500.0, 'BaselineIops': 32000, 'MaximumBandwidthInMbps': 4000, 'MaximumThroughputInMBps': 500.0, 'MaximumIops': 32000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with HibernationSupported = True .'''
# pylint: disable=all
return get
| 21,818.833333 | 261,626 | 0.742707 |
be7bfbc1b7685ebe2d5ddca7578ca1e74140d960 | 1,056 | py | Python | setup.py | futzu/SCTE35-threefive | 102ee9326d24f93130dc6fd3abf8faa90651d3df | [
"MIT"
] | 30 | 2019-12-24T01:14:40.000Z | 2021-03-17T09:45:44.000Z | setup.py | futzu/SCTE35-threefive | 102ee9326d24f93130dc6fd3abf8faa90651d3df | [
"MIT"
] | 11 | 2020-02-02T06:02:24.000Z | 2021-03-16T05:59:57.000Z | setup.py | futzu/SCTE35-threefive | 102ee9326d24f93130dc6fd3abf8faa90651d3df | [
"MIT"
] | 11 | 2020-01-30T00:54:21.000Z | 2021-01-11T06:36:06.000Z | #!/usr/bin/env python3
import setuptools
import threefive
with open("README.md", "r") as fh:
readme = fh.read()
setuptools.setup(
name="threefive",
version=threefive.version(),
author="Adrian Thiele, Vlad Doster, James Fining, Richard Van Dijk",
author_email="spam@so.slo.me",
description="Pythonic SCTE-35.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/futzu/threefive",
install_requires=[
"crcmod",
"pyaes",
],
packages=setuptools.find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
python_requires=">=3.6",
)
| 30.171429 | 72 | 0.629735 |
ddf314fbf487c95c697c011827ba62e3e4215e0e | 8,044 | py | Python | tournament.py | OliviaSalonga/P2resubmit | 1f891a1caab21770c8ae48068c52508bb1325026 | [
"FSFAP"
] | null | null | null | tournament.py | OliviaSalonga/P2resubmit | 1f891a1caab21770c8ae48068c52508bb1325026 | [
"FSFAP"
] | null | null | null | tournament.py | OliviaSalonga/P2resubmit | 1f891a1caab21770c8ae48068c52508bb1325026 | [
"FSFAP"
] | null | null | null | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
conn = psycopg2.connect("dbname='tournament'")
cur = conn.cursor()
def commit():
"""Permanently post all changes to the database.
"""
conn.commit()
def closeConnect():
"""Close cursor & connection to free system resources.
"""
cur.close()
conn.close()
def deleteMatches(tournament_id):
"""Remove the match records for the tournament or matches from the database.
Args:
tournament_id: the tournament's ID currently being processed.
If tournament_id is None, all records in the table are deleted.
"""
if tournament_id is None:
cur.execute("DELETE FROM matchResults")
else:
cur.execute("DELETE FROM matchResults WHERE tournament_id = %s", (tournament_id,))
def deletePlayersInTournament(tournament_id):
"""Remove all players in the tournament or all players from the database.
Args:
tournament_id: the tournament's ID currently being processed.
If "None" is passed, all records will be deleted from the table.
"""
if tournament_id is None:
cur.execute("DELETE FROM playersInTournament")
else:
cur.execute("DELETE FROM playersInTournament WHERE tournament_id = %s", (tournament_id,))
def deleteAllPlayers():
"""Remove all players from the database.
"""
cur.execute("DELETE FROM players")
def deleteAllTournaments():
"""Remove all tournaments from the database.
"""
cur.execute("DELETE FROM tournaments")
def countPlayersInTournament(tournament_id):
"""Returns the number of players currently registered in the tournament.
Args:
tournament_id: the tournament's ID currently being processed
"""
cur.execute("SELECT count(*) FROM playersInTournament WHERE tournament_id = %s", (tournament_id,))
playersCount = cur.fetchone()
return playersCount[0]
def registerPlayer(tournament_id, playerName):
"""Adds a player to the tournament database and the player to the current tournament being processed.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
A player can be part of several tournaments. A player can only be added to the tournament once.
This method checks that a player is only registered to the tournament once. Also, the
playersInTournament has unique constraint to check this as well.
Args:
tournament_id: the tournament's ID currently being processed
name: the player's full name (need not be unique).
"""
newPlayerAdded = False
player_id = getPlayerId(playerName)
if player_id is None:
cur.execute("INSERT INTO players (name) VALUES (%s)", (playerName,))
player_id = getPlayerId(playerName)
newPlayerAdded = True
if newPlayerAdded:
cur.execute("INSERT INTO playersInTournament (tournament_id, player_id) VALUES (%s,%s)",
(tournament_id, player_id,))
else:
recordCount = countPlayersInTournmentForPlayer(tournament_id, player_id)
if recordCount == 0:
cur.execute("INSERT INTO playersInTournament (tournament_id, player_id) VALUES (%s,%s)",
(tournament_id, player_id,))
def countPlayersInTournmentForPlayer(tournament_id,player_id):
cur.execute("SELECT count(*) FROM playersInTournament WHERE tournament_id = %s AND player_id = %s",
(tournament_id, player_id,))
recordCount = cur.fetchone()
return recordCount[0]
def getTournamentsOfPlayer(player_id):
cur.execute("SELECT * FROM playersInTournament WHERE player_id = %s",
(player_id,))
tournamentsOfPlayers = cur.fetchall()
return tournamentsOfPlayers
def getPlayerId(name):
"""Finds the player ID given the name.
Args:
name: the player's name.
"""
player_id = 0
cur.execute("SELECT player_id FROM players WHERE name = %s", (name,))
player_id = cur.fetchone()
cur.execute("SELECT * FROM players")
players = cur.fetchall()
return player_id
def playerStandings(tournament_id):
"""Returns a list of the players in the tournament and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Args:
tournament_id: the tournament's ID currently being processed
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
stmt = ('SELECT a.player_id, name, count(b.*) AS wins, count(c.*) AS matches ',
'FROM players x, playersInTournament a ',
'LEFT JOIN matchResults b ',
'ON a.player_id = b.winner ',
'AND a.tournament_id = b.tournament_id ',
'LEFT JOIN matchResults c ',
'ON (a.player_id = c.playerOne OR a.player_id = c.playerTwo) ',
'AND a.tournament_id = c.tournament_id ',
'WHERE a.tournament_id = %s ',
'AND x.player_id = a.player_id ',
'GROUP BY a.player_id, name ',
'ORDER BY wins DESC')
cur.execute(''.join(stmt), (tournament_id,))
standings = cur.fetchall()
return standings
def reportMatch(tournament_id, winner, loser):
"""Records the outcome of a single match between two players in the tournament.
Args:
tournament_id: the tournament's ID currently being processed
winner: the id number of the player who won
loser: the id number of the player who lost
"""
cur.execute("INSERT INTO matchResults (tournament_id, playerOne, playerTwo, winner) VALUES (%s,%s,%s,%s) RETURNING match_id",(tournament_id, winner, loser, winner,))
def swissPairings(tournament_id):
"""Returns a list of pairs of players for the next round of a match in the tournament.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Args:
tournament_id: the tournament's ID currently being processed
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
pairingCount = 0
playerCount = 0
standings = playerStandings(tournament_id)
nbrOfPairs = len(standings) / 2
pairings = [[]for x in range(nbrOfPairs)]
for standing in standings:
if playerCount == 0:
pairings[pairingCount].append(standing[0])
pairings[pairingCount].append(standing[1])
playerCount += 1
else:
pairings[pairingCount].append(standing[0])
pairings[pairingCount].append(standing[1])
playerCount = 0
pairingCount += 1
return pairings
def registerTournament(name):
"""Adds a tournament to the tournament database.The database assigns a unique serial id number for the tournament. (This
should be handled by your SQL database schema, not in your Python code.)
Tournament name must be unique. This method and the Tournament table unique constraint
ensures unique Tournament name.
Args:
name: the tournament name
"""
cur.execute("SELECT count(*) FROM tournaments WHERE tournamentName = %s", (name,))
recordCount = cur.fetchone()
count = recordCount[0]
if count > 0:
return False
else:
cur.execute("INSERT INTO tournaments (tournamentName) VALUES (%s)", (name,))
return True
def getTournamentID(name):
"""Finds the tournament ID given the tournament name.
Returns:
name: the tournament name
"""
cur.execute("SELECT tournament_id FROM tournaments WHERE tournamentName = %s", (name,))
tournament_id = cur.fetchone()
return tournament_id
| 34.229787 | 169 | 0.70189 |
5bdfaee05979027e8cfee9c4eeb9c2e68e3db07b | 4,789 | py | Python | fastai/torch_imports.py | LukeMathWalker/fastai | a7b9dc3afd39c31a415d475905e14abd90b5b895 | [
"Apache-2.0"
] | 7 | 2018-10-23T23:43:15.000Z | 2021-12-25T01:08:09.000Z | fastai/torch_imports.py | LukeMathWalker/fastai | a7b9dc3afd39c31a415d475905e14abd90b5b895 | [
"Apache-2.0"
] | 8 | 2021-03-18T20:46:24.000Z | 2022-03-11T23:26:30.000Z | fastai/torch_imports.py | LukeMathWalker/fastai | a7b9dc3afd39c31a415d475905e14abd90b5b895 | [
"Apache-2.0"
] | 2 | 2019-01-13T16:32:55.000Z | 2020-07-02T17:42:05.000Z | import os
from distutils.version import LooseVersion
import torch, torchvision, torchtext
from torch import nn, cuda, backends, FloatTensor, LongTensor, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, TensorDataset
from torch.nn.init import kaiming_uniform, kaiming_normal
from torchvision.transforms import Compose
from torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152
from torchvision.models import vgg16_bn, vgg19_bn
from torchvision.models import densenet121, densenet161, densenet169, densenet201
from .models.resnext_50_32x4d import resnext_50_32x4d
from .models.resnext_101_32x4d import resnext_101_32x4d
from .models.resnext_101_64x4d import resnext_101_64x4d
from .models.wrn_50_2f import wrn_50_2f
from .models.inceptionresnetv2 import InceptionResnetV2
from .models.inceptionv4 import inceptionv4
from .models.nasnet import nasnetalarge
from .models.fa_resnet import *
import warnings
warnings.filterwarnings('ignore', message='Implicit dimension choice', category=UserWarning)
IS_TORCH_04 = LooseVersion(torch.__version__) >= LooseVersion('0.4')
if IS_TORCH_04:
from torch.nn.init import kaiming_uniform_ as kaiming_uniform
from torch.nn.init import kaiming_normal_ as kaiming_normal
def children(m): return m if isinstance(m, (list, tuple)) else list(m.children())
def save_model(m, p): torch.save(m.state_dict(), p)
def load_model(m, p):
sd = torch.load(p, map_location=lambda storage, loc: storage)
names = set(m.state_dict().keys())
for n in list(sd.keys()): # list "detatches" the iterator
if n not in names and n+'_raw' in names:
if n+'_raw' not in sd: sd[n+'_raw'] = sd[n]
del sd[n]
m.load_state_dict(sd)
def load_pre(pre, f, fn):
m = f()
path = os.path.dirname(__file__)
if pre: load_model(m, f'{path}/weights/{fn}.pth')
return m
def _fastai_model(name, paper_title, paper_href):
def add_docs_wrapper(f):
f.__doc__ = f"""{name} model from
`"{paper_title}" <{paper_href}>`_
Args:
pre (bool): If True, returns a model pre-trained on ImageNet
"""
return f
return add_docs_wrapper
@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',
'https://arxiv.org/pdf/1602.07261.pdf')
def inception_4(pre): return children(inceptionv4(pretrained=pre))[0]
@_fastai_model('Inception 4', 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning',
'https://arxiv.org/pdf/1602.07261.pdf')
def inceptionresnet_2(pre): return load_pre(pre, InceptionResnetV2, 'inceptionresnetv2-d579a627')
@_fastai_model('ResNeXt 50', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext50(pre): return load_pre(pre, resnext_50_32x4d, 'resnext_50_32x4d')
@_fastai_model('ResNeXt 101_32', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext101(pre): return load_pre(pre, resnext_101_32x4d, 'resnext_101_32x4d')
@_fastai_model('ResNeXt 101_64', 'Aggregated Residual Transformations for Deep Neural Networks',
'https://arxiv.org/abs/1611.05431')
def resnext101_64(pre): return load_pre(pre, resnext_101_64x4d, 'resnext_101_64x4d')
@_fastai_model('Wide Residual Networks', 'Wide Residual Networks',
'https://arxiv.org/pdf/1605.07146.pdf')
def wrn(pre): return load_pre(pre, wrn_50_2f, 'wrn_50_2f')
@_fastai_model('Densenet-121', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn121(pre): return children(densenet121(pre))[0]
@_fastai_model('Densenet-169', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn161(pre): return children(densenet161(pre))[0]
@_fastai_model('Densenet-161', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn169(pre): return children(densenet169(pre))[0]
@_fastai_model('Densenet-201', 'Densely Connected Convolutional Networks',
'https://arxiv.org/pdf/1608.06993.pdf')
def dn201(pre): return children(densenet201(pre))[0]
@_fastai_model('Vgg-16 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',
'https://arxiv.org/pdf/1409.1556.pdf')
def vgg16(pre): return children(vgg16_bn(pre))[0]
@_fastai_model('Vgg-19 with batch norm added', 'Very Deep Convolutional Networks for Large-Scale Image Recognition',
'https://arxiv.org/pdf/1409.1556.pdf')
def vgg19(pre): return children(vgg19_bn(pre))[0]
| 44.757009 | 116 | 0.732094 |
490da6206559c3ef9f8e13a59ee314ca6c7a7de1 | 4,485 | py | Python | poloniex_apis/trading_api.py | xJuggl3r/anapolo | 5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b | [
"MIT"
] | 93 | 2016-08-21T01:17:35.000Z | 2020-11-22T07:11:00.000Z | poloniex_apis/trading_api.py | jestevez/PoloniexAnalyzer | 13fa16eb45b10cc1d052aad03cd4dccc3c4f8b3e | [
"MIT"
] | 37 | 2016-08-20T20:26:28.000Z | 2021-01-22T18:36:28.000Z | poloniex_apis/trading_api.py | jestevez/PoloniexAnalyzer | 13fa16eb45b10cc1d052aad03cd4dccc3c4f8b3e | [
"MIT"
] | 30 | 2017-02-10T01:43:13.000Z | 2018-11-05T06:31:11.000Z | """
Poloniex's Trading API. Not all trading api methods are implemented and will
probably not be added unless it will actually be used. In order for these API
methods to work, an API key and secret must be configured. Not all methods need
the "Trading Enabled" option on their API key.
"""
import hashlib
import hmac
import json
import time
from urllib.error import HTTPError
from urllib.request import urlopen
from urllib.request import Request
import sys
import dev_utils
import settings
from .api_key_secret_util import get_api_key, get_api_secret
api_url = "https://poloniex.com/tradingApi"
class InvalidKeySecretError(Exception):
"""
Exception raised for an invalid API key/secret pair.
"""
pass
class TradingApiError(Exception):
"""
Exception raised for a general TradingApi error.
"""
pass
def return_complete_balances():
body = _build_body(command="returnCompleteBalances")
return _call_trading_api(body)
def return_deposits_withdrawals():
parameters = {
'start': '0',
'end': time.time()
}
body = _build_body(
command="returnDepositsWithdrawals",
parameters=parameters
)
return _call_trading_api(body)
def return_trade_history():
if settings.MOCK_API_RESPONSE:
return dev_utils.file_to_dict("return_trade_history.txt")
parameters = {
'currencyPair': 'all',
'start': '0',
'end': time.time()
}
body = _build_body(
command="returnTradeHistory",
parameters=parameters
)
return _call_trading_api(body)
def return_lending_history(start, end):
"""
Args:
start: start time
end: end time
Returns: json of lending history between designated times
"""
parameters = {
'start': start,
'end': end
}
body = _build_body(
command="returnLendingHistory",
parameters=parameters
)
return _call_trading_api(body)
def _sign_header(post_body):
hashed = hmac.new(get_api_secret(), bytes(post_body, encoding='utf-8'), hashlib.sha512)
return hashed.hexdigest()
def _call_trading_api(post_body):
"""
Calls the Poloniex Trading API.
The Poloniex trading API required two headers with the api key, and a
signed POST body signed with the secret.
:param post_body: (str) POST parameters
:return: (dict) Response
:raises: InvalidKeySecretError
:raises: TradingApiError
"""
request = Request(api_url)
request.add_header("Key", get_api_key())
request.add_header("Sign", _sign_header(post_body))
request.data = bytes(post_body, encoding='utf-8')
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 422:
print("HTTP Error 422. Use a new API key/secret. From the Poloniex API doc:\n"
" Additionally, all queries must include a 'nonce' POST parameter.\n"
" The nonce parameter is an integer which must always be greater \n"
" than the previous nonce used.\n\n"
"If you have used another script or the api directly, the nonce value\n"
"is persistent may be greater than what this script is setting. This \n"
"script uses the Epoch time to determine the nonce.")
sys.exit(0)
if err.code == 403:
print("HTTP Error 403. Are your api keys correct?")
sys.exit(0)
decoded_response = response.read().decode('utf8')
response_dict = json.loads(decoded_response)
if "error" in response_dict:
if response_dict["error"] == "Invalid API key/secret pair.":
raise InvalidKeySecretError
else:
raise TradingApiError(response_dict["error"])
return response_dict
def _build_body(command, parameters=None):
"""
Builds the body for the trading api. Api methods are specified by the
'command' POST parameter. Additionally, each query must have the 'nonce'
POST parameter which requires a greater int than the previous call.
:type parameters: (dict) Extra parameters
:param command: (str) API method
:return: (str) POST body
"""
body = "command={}".format(command)
nonce_int = int(time.time() * 100)
body += "&nonce={}".format(nonce_int)
if parameters is not None:
for key, value in parameters.items():
body += "&{}={}".format(key, value)
return body
| 28.935484 | 92 | 0.65641 |
130699c75acd9867f7957a1a3003bdb3a670bc44 | 2,422 | py | Python | Smith-Waterman/linear_graph.py | ivanpmartell/bioinfo_algos | 60b8e1187bf3129c96a9f832df7b0bbb3871a370 | [
"MIT"
] | null | null | null | Smith-Waterman/linear_graph.py | ivanpmartell/bioinfo_algos | 60b8e1187bf3129c96a9f832df7b0bbb3871a370 | [
"MIT"
] | null | null | null | Smith-Waterman/linear_graph.py | ivanpmartell/bioinfo_algos | 60b8e1187bf3129c96a9f832df7b0bbb3871a370 | [
"MIT"
] | null | null | null | from operator import itemgetter
import numpy as np #ONLY FOR 2D LIST PRETTY PRINT
from helper import maxes, backtrack, Cell
d = 2
seq_a = 'ATCAGAGTC'
seq_b = 'TTCAGTC'
def main():
""" Main program """
F, max_cells = make_matrix()
pretty_matrix = np.array(F)
print(pretty_matrix)
alignments = backtrack(F, backtrack_formulas, max_cells)
print('Best Alignments')
for alignment in alignments:
print(alignment)
return 0
def make_matrix():
x = list(' ' + seq_a)
y = list(' ' + seq_b)
rows, cols = (len(x), len(y))
matrix = [[Cell(0) for j in range(cols)] for i in range(rows)]
matrix[0][0].position = (0, 0)
for i in range(1, rows):
matrix[i][0](0, (i, 0)).parents = [matrix[0][0]]
for j in range(1, cols):
matrix[0][j](0, (0, j)).parents = [matrix[0][0]]
max_cells = [matrix[0][0]]
for i in range(1, rows):
for j in range(1, cols):
cells = {0: matrix[i-1][j-1],
1: matrix[i-1][j],
2: matrix[i][j-1],
3: None}
values = [cells[0] + s(x[i],y[j]),
cells[1] - d,
cells[2] - d,
0]
result, indices = maxes(range(len(values)), key=values.__getitem__)
parents = itemgetter(*indices)(cells)
if(not type(parents) == tuple):
parents = tuple([parents])
matrix[i][j](result, (i, j)).parents = parents
if(matrix[i][j].value > max_cells[0].value):
max_cells = [matrix[i][j]]
elif(matrix[i][j].value == max_cells[0].value):
max_cells.append(matrix[i][j])
return matrix, max_cells
def backtrack_formulas(alignment, cell, parent):
x = alignment.strings[0]
y = alignment.strings[1]
if(parent.position[0] < cell.position[0] and parent.position[1] < cell.position[1]):
x = seq_a[parent.position[0]] + x
y = seq_b[parent.position[1]] + y
elif(parent.position[0] < cell.position[0]):
x = seq_a[parent.position[0]] + x
y = '-' + y
elif(parent.position[1] < cell.position[1]):
x = '-' + x
y = seq_b[parent.position[1]] + y
else:
raise Exception
return x, y
def s(x_i, y_i):
if x_i == y_i:
return 2
else:
return -1
if __name__ == "__main__":
main()
| 29.901235 | 88 | 0.53014 |
1e0a7ee8c4a8a671f087a7b49e26952717af49e5 | 41 | py | Python | pydiablo/error.py | youbetterdont/pydiablo | 406994e62c4f48fb3d788d458a7b8efb5a465b65 | [
"MIT"
] | 5 | 2019-03-18T06:15:51.000Z | 2021-02-26T22:58:08.000Z | pydiablo/error.py | youbetterdont/pydiablo | 406994e62c4f48fb3d788d458a7b8efb5a465b65 | [
"MIT"
] | null | null | null | pydiablo/error.py | youbetterdont/pydiablo | 406994e62c4f48fb3d788d458a7b8efb5a465b65 | [
"MIT"
] | 1 | 2020-06-16T06:47:09.000Z | 2020-06-16T06:47:09.000Z | class PydiabloError(Exception):
pass
| 13.666667 | 31 | 0.756098 |
758f9024af02d593c80ab1889b9464de2e3f6543 | 1,819 | py | Python | digitalocean/komand_digitalocean/actions/convert_image_to_snapshot/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | digitalocean/komand_digitalocean/actions/convert_image_to_snapshot/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | digitalocean/komand_digitalocean/actions/convert_image_to_snapshot/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
import json
import requests
from .schema import ConvertImageToSnapshotInput, ConvertImageToSnapshotOutput
class ConvertImageToSnapshot(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='convert_image_to_snapshot',
description='Converts an image to a snapshot',
input=ConvertImageToSnapshotInput(),
output=ConvertImageToSnapshotOutput())
def run(self, params={}):
url = "https://api.digitalocean.com/v2/images/{image_id}/actions"
payload = {"type": "convert"}
try:
response = requests.post(headers=self.connection.headers,
url=url.format(image_id=params['image_id']),
data=json.dumps(payload))
if response.status_code == 201:
return {'success': True}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception('Non-201 status code received')
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
raise
def test(self):
url = "https://api.digitalocean.com/v2/account"
try:
response = requests.get(headers=self.connection.headers, url=url)
if response.status_code == 200:
return {}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception("Non-200 status code received")
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
| 38.702128 | 115 | 0.607477 |
25c1eea64bcef16a80dd2c94c9697d5b2f07f10d | 2,836 | py | Python | bokeh/application/handlers/document_lifecycle.py | RiccardoGiro/bokeh | 7e1bfeea4f3d7a6296aabfeec96e79e1f5a28467 | [
"BSD-3-Clause"
] | 1 | 2021-10-30T00:32:00.000Z | 2021-10-30T00:32:00.000Z | bokeh/application/handlers/document_lifecycle.py | Deng-Fankang/bokeh | 894731860c53b7c9ddd0057dee85cf064278dc0e | [
"BSD-3-Clause"
] | 12 | 2020-08-26T20:19:29.000Z | 2020-08-26T20:19:52.000Z | bokeh/application/handlers/document_lifecycle.py | Deng-Fankang/bokeh | 894731860c53b7c9ddd0057dee85cf064278dc0e | [
"BSD-3-Clause"
] | 2 | 2021-01-12T18:22:24.000Z | 2021-10-30T00:32:02.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Bokeh Application Handler to execute on_session_destroyed callbacks defined
on the Document.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .lifecycle import LifecycleHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'DocumentLifecycleHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class DocumentLifecycleHandler(LifecycleHandler):
''' Calls on_session_destroyed callbacks defined on the Document.
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._on_session_destroyed = _on_session_destroyed
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _on_session_destroyed(session_context):
'''
Calls any on_session_destroyed callbacks defined on the Document
'''
callbacks = session_context._document.session_destroyed_callbacks
session_context._document.session_destroyed_callbacks = set()
for callback in callbacks:
try:
callback(session_context)
except Exception as e:
log.warning('DocumentLifeCycleHandler on_session_destroyed '
'callback %s failed with following error: %s'
% (callback, e))
if callbacks:
# If any session callbacks were defined garbage collect after deleting all references
del callback
del callbacks
import gc
gc.collect()
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.358974 | 93 | 0.385049 |
b61d31a3772c0e1e7a9d2d84249633ece3562c92 | 4,224 | py | Python | graphgallery/gallery/nodeclas/tensorflow/appnp.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | graphgallery/gallery/nodeclas/tensorflow/appnp.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | graphgallery/gallery/nodeclas/tensorflow/appnp.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | from graphgallery.sequence import FullBatchSequence
from graphgallery import functional as gf
from graphgallery.gallery.nodeclas import TensorFlow
from graphgallery.gallery import Trainer
from graphgallery.nn.models import get_model
@TensorFlow.register()
class APPNP(Trainer):
"""Implementation of approximated personalized propagation of neural
predictions (APPNP).
`Predict then Propagate: Graph Neural Networks meet Personalized
PageRank" <https://arxiv.org/abs/1810.05997>`
Tensorflow 1.x implementation: <https://github.com/klicperajo/ppnp>
Pytorch implementation: <https://github.com/klicperajo/ppnp>
"""
def data_step(self,
adj_transform="normalize_adj",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
node_attr = gf.get(attr_transform)(graph.node_attr)
X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
# ``A`` and ``X`` are cached for later use
self.register_cache(X=X, A=A)
def model_step(self,
hids=[64],
acts=['relu'],
alpha=0.1,
K=10,
ppr_dropout=0.2,
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=True,
use_tfn=True):
model = get_model("APPNP", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
alpha=alpha,
K=K,
ppr_dropout=ppr_dropout,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias,
approximated=True)
return model
def train_loader(self, index):
labels = self.graph.node_label[index]
sequence = FullBatchSequence(x=[self.cache.X, self.cache.A],
y=labels,
out_index=index,
device=self.data_device)
return sequence
@TensorFlow.register()
class PPNP(Trainer):
"""Implementation of exact personalized propagation of neural
predictions (PPNP).
`Predict then Propagate: Graph Neural Networks meet Personalized
PageRank" <https://arxiv.org/abs/1810.05997>`
Tensorflow 1.x implementation: <https://github.com/klicperajo/ppnp>
Pytorch implementation: <https://github.com/klicperajo/ppnp>
"""
def data_step(self,
adj_transform="PPR",
attr_transform=None):
graph = self.graph
adj_matrix = gf.get(adj_transform)(graph.adj_matrix)
node_attr = gf.get(attr_transform)(graph.node_attr)
X, A = gf.astensors(node_attr, adj_matrix, device=self.data_device)
# ``A`` and ``X`` are cached for later use
self.register_cache(X=X, A=A)
def model_step(self,
hids=[64],
acts=['relu'],
ppr_dropout=0.,
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=True):
model = get_model("APPNP", self.backend)
model = model(self.graph.num_node_attrs,
self.graph.num_node_classes,
hids=hids,
acts=acts,
ppr_dropout=ppr_dropout,
dropout=dropout,
weight_decay=weight_decay,
lr=lr,
bias=bias,
approximated=False)
return model
def train_loader(self, index):
labels = self.graph.node_label[index]
sequence = FullBatchSequence(x=[self.cache.X, self.cache.A],
y=labels,
out_index=index,
device=self.data_device)
return sequence
| 34.341463 | 75 | 0.525331 |
b79536f04ff8a37eed5a075c3f8749cb92bd51e2 | 1,142 | py | Python | cairis/gui/ModelMenuFactory.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/ModelMenuFactory.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | cairis/gui/ModelMenuFactory.py | RachelLar/cairis_update | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gtk
def build(modelType):
if (modelType == 'goal'):
return ['And Goal','Or Goal','Goal','Sub Goal','And Requirement','Sub Requirement','Assign Responsibility']
elif (modelType == 'obstacle'):
return ['And Obstacle','Or Obstacle','','','','','']
elif (modelType == 'class'):
return ['Associate']
else:
return []
| 38.066667 | 111 | 0.715412 |
356968eb67a8e449c5973fcaae53479afed6e293 | 12,736 | py | Python | addons/io_scene_gltf2/blender/imp/gltf2_blender_node.py | xissburg/glTF-Blender-IO | b35e27dcbc0e01fd1cca59255dbf1754eec55eb4 | [
"Apache-2.0"
] | 25 | 2018-10-12T08:45:45.000Z | 2021-02-27T15:29:56.000Z | addons/io_scene_gltf2/blender/imp/gltf2_blender_node.py | xissburg/glTF-Blender-IO | b35e27dcbc0e01fd1cca59255dbf1754eec55eb4 | [
"Apache-2.0"
] | 1 | 2019-06-03T20:16:21.000Z | 2019-06-03T20:44:41.000Z | addons/io_scene_gltf2/blender/imp/gltf2_blender_node.py | Svrf/glTF-Blender-IO | c539fcedabdc5c5b77d7919a39f352ac330f6985 | [
"Apache-2.0"
] | 4 | 2019-02-03T12:33:55.000Z | 2021-01-04T11:19:25.000Z | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from .gltf2_blender_mesh import BlenderMesh
from .gltf2_blender_camera import BlenderCamera
from .gltf2_blender_skin import BlenderSkin
from .gltf2_blender_light import BlenderLight
from ..com.gltf2_blender_conversion import scale_to_matrix, matrix_gltf_to_blender, correction_rotation
class BlenderNode():
"""Blender Node."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, node_idx, parent):
"""Node creation."""
pynode = gltf.data.nodes[node_idx]
# Blender attributes initialization
pynode.blender_object = ""
pynode.parent = parent
gltf.display_current_node += 1
if bpy.app.debug_value == 101:
gltf.log.critical("Node " + str(gltf.display_current_node) + " of " + str(gltf.display_total_nodes) + " (idx " + str(node_idx) + ")")
if pynode.mesh is not None:
instance = False
if gltf.data.meshes[pynode.mesh].blender_name is not None:
# Mesh is already created, only create instance
# Except is current node is animated with path weight
# Or if previous instance is animation at node level
if pynode.weight_animation is True:
instance = False
else:
if gltf.data.meshes[pynode.mesh].is_weight_animated is True:
instance = False
else:
instance = True
mesh = bpy.data.meshes[gltf.data.meshes[pynode.mesh].blender_name]
if instance is False:
if pynode.name:
gltf.log.info("Blender create Mesh node " + pynode.name)
else:
gltf.log.info("Blender create Mesh node")
mesh = BlenderMesh.create(gltf, pynode.mesh, node_idx, parent)
if pynode.weight_animation is True:
# flag this mesh instance as created only for this node, because of weight animation
gltf.data.meshes[pynode.mesh].is_weight_animated = True
if pynode.name:
name = pynode.name
else:
# Take mesh name if exist
if gltf.data.meshes[pynode.mesh].name:
name = gltf.data.meshes[pynode.mesh].name
else:
name = "Object_" + str(node_idx)
obj = bpy.data.objects.new(name, mesh)
obj.rotation_mode = 'QUATERNION'
if bpy.app.version < (2, 80, 0):
bpy.data.scenes[gltf.blender_scene].objects.link(obj)
else:
if gltf.blender_active_collection is not None:
bpy.data.collections[gltf.blender_active_collection].objects.link(obj)
else:
bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj)
# Transforms apply only if this mesh is not skinned
# See implementation node of gltf2 specification
if not (pynode.mesh is not None and pynode.skin is not None):
BlenderNode.set_transforms(gltf, node_idx, pynode, obj, parent)
pynode.blender_object = obj.name
BlenderNode.set_parent(gltf, obj, parent)
if instance == False:
BlenderMesh.set_mesh(gltf, gltf.data.meshes[pynode.mesh], mesh, obj)
if pynode.children:
for child_idx in pynode.children:
BlenderNode.create(gltf, child_idx, node_idx)
return
if pynode.camera is not None:
if pynode.name:
gltf.log.info("Blender create Camera node " + pynode.name)
else:
gltf.log.info("Blender create Camera node")
obj = BlenderCamera.create(gltf, pynode.camera)
BlenderNode.set_transforms(gltf, node_idx, pynode, obj, parent) # TODO default rotation of cameras ?
pynode.blender_object = obj.name
BlenderNode.set_parent(gltf, obj, parent)
if pynode.children:
for child_idx in pynode.children:
BlenderNode.create(gltf, child_idx, node_idx)
return
if pynode.is_joint:
if pynode.name:
gltf.log.info("Blender create Bone node " + pynode.name)
else:
gltf.log.info("Blender create Bone node")
# Check if corresponding armature is already created, create it if needed
if gltf.data.skins[pynode.skin_id].blender_armature_name is None:
BlenderSkin.create_armature(gltf, pynode.skin_id, parent)
BlenderSkin.create_bone(gltf, pynode.skin_id, node_idx, parent)
if pynode.children:
for child_idx in pynode.children:
BlenderNode.create(gltf, child_idx, node_idx)
return
if pynode.extensions is not None:
if 'KHR_lights_punctual' in pynode.extensions.keys():
obj = BlenderLight.create(gltf, pynode.extensions['KHR_lights_punctual']['light'])
obj.rotation_mode = 'QUATERNION'
BlenderNode.set_transforms(gltf, node_idx, pynode, obj, parent, correction=True)
pynode.blender_object = obj.name
pynode.correction_needed = True
BlenderNode.set_parent(gltf, obj, parent)
if pynode.children:
for child_idx in pynode.children:
BlenderNode.create(gltf, child_idx, node_idx)
return
# No mesh, no camera, no light. For now, create empty #TODO
if pynode.name:
gltf.log.info("Blender create Empty node " + pynode.name)
obj = bpy.data.objects.new(pynode.name, None)
else:
gltf.log.info("Blender create Empty node")
obj = bpy.data.objects.new("Node", None)
obj.rotation_mode = 'QUATERNION'
if bpy.app.version < (2, 80, 0):
bpy.data.scenes[gltf.blender_scene].objects.link(obj)
else:
if gltf.blender_active_collection is not None:
bpy.data.collections[gltf.blender_active_collection].objects.link(obj)
else:
bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj)
BlenderNode.set_transforms(gltf, node_idx, pynode, obj, parent)
pynode.blender_object = obj.name
BlenderNode.set_parent(gltf, obj, parent)
if pynode.children:
for child_idx in pynode.children:
BlenderNode.create(gltf, child_idx, node_idx)
@staticmethod
def set_parent(gltf, obj, parent):
"""Set parent."""
if parent is None:
return
for node_idx, node in enumerate(gltf.data.nodes):
if node_idx == parent:
if node.is_joint is True:
bpy.ops.object.select_all(action='DESELECT')
if bpy.app.version < (2, 80, 0):
bpy.data.objects[node.blender_armature_name].select = True
bpy.context.scene.objects.active = bpy.data.objects[node.blender_armature_name]
else:
bpy.data.objects[node.blender_armature_name].select_set(True)
bpy.context.view_layer.objects.active = bpy.data.objects[node.blender_armature_name]
bpy.ops.object.mode_set(mode='EDIT')
bpy.data.objects[node.blender_armature_name].data.edit_bones.active = \
bpy.data.objects[node.blender_armature_name].data.edit_bones[node.blender_bone_name]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
if bpy.app.version < (2, 80, 0):
obj.select = True
bpy.data.objects[node.blender_armature_name].select = True
bpy.context.scene.objects.active = bpy.data.objects[node.blender_armature_name]
bpy.context.scene.update()
else:
obj.select_set(True)
bpy.data.objects[node.blender_armature_name].select_set(True)
bpy.context.view_layer.objects.active = bpy.data.objects[node.blender_armature_name]
bpy.context.view_layer.update()
bpy.ops.object.parent_set(type='BONE_RELATIVE', keep_transform=True)
# From world transform to local (-armature transform -bone transform)
bone_trans = bpy.data.objects[node.blender_armature_name] \
.pose.bones[node.blender_bone_name].matrix.to_translation().copy()
bone_rot = bpy.data.objects[node.blender_armature_name] \
.pose.bones[node.blender_bone_name].matrix.to_quaternion().copy()
bone_scale_mat = scale_to_matrix(node.blender_bone_matrix.to_scale())
if bpy.app.version < (2, 80, 0):
obj.location = bone_scale_mat * obj.location
obj.location = bone_rot * obj.location
obj.location += bone_trans
obj.location = bpy.data.objects[node.blender_armature_name].matrix_world.to_quaternion() \
* obj.location
obj.rotation_quaternion = obj.rotation_quaternion \
* bpy.data.objects[node.blender_armature_name].matrix_world.to_quaternion()
obj.scale = bone_scale_mat * obj.scale
else:
obj.location = bone_scale_mat @ obj.location
obj.location = bone_rot @ obj.location
obj.location += bone_trans
obj.location = bpy.data.objects[node.blender_armature_name].matrix_world.to_quaternion() \
@ obj.location
obj.rotation_quaternion = obj.rotation_quaternion \
@ bpy.data.objects[node.blender_armature_name].matrix_world.to_quaternion()
obj.scale = bone_scale_mat @ obj.scale
return
if node.blender_object:
obj.parent = bpy.data.objects[node.blender_object]
return
gltf.log.error("ERROR, parent not found")
@staticmethod
def set_transforms(gltf, node_idx, pynode, obj, parent, correction=False):
"""Set transforms."""
if parent is None:
obj.matrix_world = matrix_gltf_to_blender(pynode.transform)
if correction is True:
if bpy.app.version < (2, 80, 0):
obj.matrix_world = obj.matrix_world * correction_rotation()
else:
obj.matrix_world = obj.matrix_world @ correction_rotation()
return
for idx, node in enumerate(gltf.data.nodes):
if idx == parent:
if node.is_joint is True:
obj.matrix_world = matrix_gltf_to_blender(pynode.transform)
if correction is True:
if bpy.app.version < (2, 80, 0):
obj.matrix_world = obj.matrix_world * correction_rotation()
else:
obj.matrix_world = obj.matrix_world @ correction_rotation()
return
else:
if correction is True:
if bpy.app.version < (2, 80, 0):
obj.matrix_world = obj.matrix_world * correction_rotation()
else:
obj.matrix_world = obj.matrix_world @ correction_rotation()
obj.matrix_world = matrix_gltf_to_blender(pynode.transform)
return
| 46.823529 | 145 | 0.575298 |
01ab8114e6649e81b5f9a5fd9010cf3adf62d08d | 389 | py | Python | codice/wsgi.py | lnds/codice | b2edad6bd5f1fdc42a8335b265e131af33081934 | [
"MIT"
] | 1 | 2021-04-22T15:25:15.000Z | 2021-04-22T15:25:15.000Z | codice/wsgi.py | lnds/codice | b2edad6bd5f1fdc42a8335b265e131af33081934 | [
"MIT"
] | 3 | 2021-04-22T14:22:43.000Z | 2021-07-20T14:11:22.000Z | codice/wsgi.py | lnds/codice | b2edad6bd5f1fdc42a8335b265e131af33081934 | [
"MIT"
] | null | null | null | """
WSGI config for codice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codice.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
33c077392c75bc22020c013b506e7a7bdc05e8bd | 1,137 | py | Python | src/events/view.py | KenyC/Shajara | ec5ab94e92cf154aef63ac2278fddff901f6328c | [
"MIT"
] | null | null | null | src/events/view.py | KenyC/Shajara | ec5ab94e92cf154aef63ac2278fddff901f6328c | [
"MIT"
] | null | null | null | src/events/view.py | KenyC/Shajara | ec5ab94e92cf154aef63ac2278fddff901f6328c | [
"MIT"
] | null | null | null | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
import utils
import cst
class ViewMouseManager:
"""Manages click, drag event on the tree graphics view"""
sprout_dist = cst.style["nodes"]["sensitivity"]
def __init__(self, view, synchronizer):
self.view = view
self.synchronizer = synchronizer
self.tree = self.synchronizer.tree
self.view.treat_event = self.effect
def effect(self, event):
self.add_remove_node(
self.view.mapToScene(event.pos()),
event.buttons()
)
def add_remove_node(self, scene_pos, buttons):
for i, p in enumerate(self.tree.positions):
if utils.dist((scene_pos.x(), scene_pos.y()), p) < ViewMouseManager.sprout_dist:
modifiers = QApplication.keyboardModifiers()
if cst.behavior["view"]["select_node"](buttons, modifiers):
self.synchronizer.select_node(i)
elif cst.behavior["view"]["add_node"] (buttons, modifiers):
self.tree.sprout(i)
self.synchronizer.update_tree()
elif cst.behavior["view"]["remove_node"](buttons, modifiers):
self.tree.delete(i)
self.synchronizer.update_tree()
return
| 26.44186 | 83 | 0.702726 |
09a2dbf3c576a99471716c153aa7921cd24330cc | 879 | py | Python | portalsite/portal/models/scoring.py | AntonSamojlow/authenticity-portal | d3a38096c9c6aaf688ed9783464396be3ebbe673 | [
"MIT"
] | null | null | null | portalsite/portal/models/scoring.py | AntonSamojlow/authenticity-portal | d3a38096c9c6aaf688ed9783464396be3ebbe673 | [
"MIT"
] | 2 | 2021-11-06T14:31:24.000Z | 2021-11-06T17:10:58.000Z | portalsite/portal/models/scoring.py | AntonSamojlow/authenticity-portal | d3a38096c9c6aaf688ed9783464396be3ebbe673 | [
"MIT"
] | null | null | null | """Data base model: Scoring"""
# region imports
# standard
from typing import TYPE_CHECKING
from django.db import models
from django.urls import reverse
# 3rd party
# local
# type hints
if TYPE_CHECKING:
from .measurement import Measurement
from .model import Model
# endregion
class Scoring(models.Model):
"""Performance evaluation of a models prediction against a labelled measurement"""
value = models.FloatField(default=0)
model = models.ForeignKey('Model', on_delete=models.CASCADE)
measurement = models.ForeignKey('Measurement', on_delete=models.CASCADE)
time = models.DateTimeField(auto_now_add=True, help_text="time it was generated")
def __str__(self):
return str(self.id)
def get_absolute_url(self):
"""Returns the url to display the object."""
return reverse('scoring-detail', args=[str(self.id)])
| 25.114286 | 86 | 0.721274 |
3594deeaed9d6b5e6231c16c1124f80e113ece86 | 48,266 | py | Python | openstack_dashboard/api/ceilometer.py | AlexOugh/horizon | bda2a59aad7637f45211db37235ab18323e20b25 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/api/ceilometer.py | AlexOugh/horizon | bda2a59aad7637f45211db37235ab18323e20b25 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/api/ceilometer.py | AlexOugh/horizon | bda2a59aad7637f45211db37235ab18323e20b25 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built form given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id',
'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtain by specifing
multiple ids in one parameter or by not specifying
one parameter.
Or it can be specified by query directly.
Example:
We obtain can have aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if (ceilometer_usage and tenant_id):
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if (ceilometer_usage and user_id):
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if (resource_id):
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).\
resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched form API
Caching the result, so it doesn't contact API twice with the
same query
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather the fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched form API.
Caching the result, so it doesn't contact API twice with the
same query
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all teannts into dictionary.
It's more effective to preload all tenants, rather the fetching many
tenants by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by theirs links.rel attr.
The links.rel attributes contains all meters the resource have.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtain by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resource must be defined to be"
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter'
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Duration of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Duration of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM in MB"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used in MB"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads in B"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes in B"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests per second"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests per second"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads in B per second"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes in B per second"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk in GB"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk "
"in GB"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Duration of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Duration of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Duration of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Duration of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Duration of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of update on the image"),
}),
('image.upload', {
'label': '',
'description': _("Number of upload of the image"),
}),
('image.delete', {
'label': '',
'description': _("Number of delete on the image"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Duration of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.node.temperature', {
'label': '',
'description': _("System Current Temperature"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
])
| 37.531882 | 79 | 0.559877 |
49a69e0b57a2cc503a0f5a8f830f6ff065f4fae6 | 361 | py | Python | pyabc/cv/powerlaw.py | Pat-Laub/pyABC | f23f0ff8d430a8ce0a0c8253b45e19add9121992 | [
"BSD-3-Clause"
] | 144 | 2017-10-23T11:20:09.000Z | 2022-03-31T08:55:51.000Z | pyabc/cv/powerlaw.py | Pat-Laub/pyABC | f23f0ff8d430a8ce0a0c8253b45e19add9121992 | [
"BSD-3-Clause"
] | 494 | 2018-02-14T09:49:26.000Z | 2022-03-29T12:09:39.000Z | pyabc/cv/powerlaw.py | Pat-Laub/pyABC | f23f0ff8d430a8ce0a0c8253b45e19add9121992 | [
"BSD-3-Clause"
] | 45 | 2018-08-27T18:01:46.000Z | 2022-03-30T14:05:37.000Z | import numpy as np
from scipy.optimize import curve_fit
def power_law(x, a, b):
return a * x ** (-b)
def finverse(y, a, b):
return (a / y) ** (1 / b)
def fitpowerlaw(x, y):
x = np.array(x)
y = np.array(y)
popt, _ = curve_fit(power_law, x, y, p0=[.5, 1 / 5])
return popt, lambda x: power_law(x, *popt), lambda y: finverse(y, *popt)
| 20.055556 | 76 | 0.581717 |
59a8b9461448335663a23e0a694efeba0c7a060f | 1,106 | py | Python | .github/workflows/email-update-config.py | dbigge/Network-Automation | f7646471fc81e0d293ab94b34eaf842afc54053b | [
"PSF-2.0"
] | null | null | null | .github/workflows/email-update-config.py | dbigge/Network-Automation | f7646471fc81e0d293ab94b34eaf842afc54053b | [
"PSF-2.0"
] | null | null | null | .github/workflows/email-update-config.py | dbigge/Network-Automation | f7646471fc81e0d293ab94b34eaf842afc54053b | [
"PSF-2.0"
] | null | null | null | # $language = "python"
# $interface = "1.0"
# This automatically generated script may need to be
# edited in order to work correctly.
def Main():
crt.Screen.Synchronous = True
crt.Screen.Send("cd nodetracker" + chr(13))
crt.Screen.WaitForString(chr(27) + "]0;devops@zen-2: " + chr(126) + "/nodetracker" + chr(7) + "devops@zen-2:" + chr(126) + "/nodetracker$ ")
crt.Screen.Send("sed -k " + chr(8) + chr(8) + "i 's/david/d/' ./config/config.json" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D" + chr(27) + "[D." + chr(13))
crt.Screen.WaitForString(chr(27) + "]0;devops@zen-2: " + chr(126) + "/nodetracker" + chr(7) + "devops@zen-2:" + chr(126) + "/nodetracker$ ")
crt.Screen.Send("node app.js" + chr(13))
crt.Screen.Send(chr(3))
Main()
| 65.058824 | 521 | 0.526221 |
2550cd90916b80068d49fa912c6bf5e8ea769593 | 6,324 | py | Python | localstack/services/firehose/firehose_api.py | vaclav-krajicek/localstack | ad556769c12a2409a052535a315e7723949fbaec | [
"Apache-2.0"
] | 1 | 2017-10-15T17:32:57.000Z | 2017-10-15T17:32:57.000Z | localstack/services/firehose/firehose_api.py | Acidburn0zzz/localstack | 67cffcd881bfe8f18f8d0e43400125f1053d2821 | [
"Apache-2.0"
] | 1 | 2019-07-01T12:08:26.000Z | 2019-07-01T12:08:26.000Z | localstack/services/firehose/firehose_api.py | Acidburn0zzz/localstack | 67cffcd881bfe8f18f8d0e43400125f1053d2821 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import json
import uuid
import time
import logging
import base64
import traceback
from flask import Flask, jsonify, request
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.utils.common import short_uid, to_str
from localstack.utils.aws import aws_responses
from localstack.utils.aws.aws_stack import get_s3_client, firehose_stream_arn
from six import iteritems
APP_NAME = 'firehose_api'
app = Flask(APP_NAME)
ACTION_HEADER_PREFIX = 'Firehose_20150804'
# logger
LOG = logging.getLogger(__name__)
# maps stream names to details
DELIVERY_STREAMS = {}
def get_delivery_stream_names():
names = []
for name, stream in iteritems(DELIVERY_STREAMS):
names.append(stream['DeliveryStreamName'])
return names
def put_record(stream_name, record):
return put_records(stream_name, [record])
def put_records(stream_name, records):
stream = get_stream(stream_name)
for dest in stream['Destinations']:
if 'S3DestinationDescription' in dest:
s3_dest = dest['S3DestinationDescription']
bucket = bucket_name(s3_dest['BucketARN'])
prefix = s3_dest['Prefix']
s3 = get_s3_client()
for record in records:
data = base64.b64decode(record['Data'])
obj_name = str(uuid.uuid4())
obj_path = '%s%s' % (prefix, obj_name)
try:
s3.Object(bucket, obj_path).put(Body=data)
except Exception as e:
LOG.error('Unable to put record to stream: %s %s' % (e, traceback.format_exc()))
raise e
def get_destination(stream_name, destination_id):
stream = get_stream(stream_name)
destinations = stream['Destinations']
for dest in destinations:
if dest['DestinationId'] == destination_id:
return dest
dest = {}
dest['DestinationId'] = destination_id
destinations.append(dest)
return dest
def update_destination(stream_name, destination_id,
s3_update=None, elasticsearch_update=None, version_id=None):
dest = get_destination(stream_name, destination_id)
if elasticsearch_update:
LOG.warning('Firehose to Elasticsearch updates not yet implemented!')
if s3_update:
if 'S3DestinationDescription' not in dest:
dest['S3DestinationDescription'] = {}
for k, v in iteritems(s3_update):
dest['S3DestinationDescription'][k] = v
return dest
def create_stream(stream_name, s3_destination=None):
stream = {
'HasMoreDestinations': False,
'VersionId': '1',
'CreateTimestamp': time.time(),
'DeliveryStreamARN': firehose_stream_arn(stream_name),
'DeliveryStreamStatus': 'ACTIVE',
'DeliveryStreamName': stream_name,
'Destinations': []
}
DELIVERY_STREAMS[stream_name] = stream
if s3_destination:
update_destination(stream_name=stream_name, destination_id=short_uid(), s3_update=s3_destination)
return stream
def delete_stream(stream_name):
stream = DELIVERY_STREAMS.pop(stream_name, {})
if not stream:
return error_not_found(stream_name)
return {}
def get_stream(stream_name):
if stream_name not in DELIVERY_STREAMS:
return None
return DELIVERY_STREAMS[stream_name]
def bucket_name(bucket_arn):
return bucket_arn.split(':::')[-1]
def role_arn(stream_name):
return 'arn:aws:iam::%s:role/%s' % (TEST_AWS_ACCOUNT_ID, stream_name)
def error_not_found(stream_name):
msg = 'Firehose %s under account %s not found.' % (stream_name, TEST_AWS_ACCOUNT_ID)
return error_response(msg, code=400, error_type='ResourceNotFoundException')
def error_response(msg, code=500, error_type='InternalFailure'):
return aws_responses.flask_error_response(msg, code=code, error_type=error_type)
@app.route('/', methods=['POST'])
def post_request():
action = request.headers.get('x-amz-target')
data = json.loads(to_str(request.data))
response = None
if action == '%s.ListDeliveryStreams' % ACTION_HEADER_PREFIX:
response = {
'DeliveryStreamNames': get_delivery_stream_names(),
'HasMoreDeliveryStreams': False
}
elif action == '%s.CreateDeliveryStream' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
response = create_stream(stream_name, s3_destination=data.get('S3DestinationConfiguration'))
elif action == '%s.DeleteDeliveryStream' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
response = delete_stream(stream_name)
elif action == '%s.DescribeDeliveryStream' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
response = get_stream(stream_name)
if not response:
return error_not_found(stream_name)
response = {
'DeliveryStreamDescription': response
}
elif action == '%s.PutRecord' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
record = data['Record']
put_record(stream_name, record)
response = {
'RecordId': str(uuid.uuid4())
}
elif action == '%s.PutRecordBatch' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
records = data['Records']
put_records(stream_name, records)
response = {
'FailedPutCount': 0,
'RequestResponses': []
}
elif action == '%s.UpdateDestination' % ACTION_HEADER_PREFIX:
stream_name = data['DeliveryStreamName']
version_id = data['CurrentDeliveryStreamVersionId']
destination_id = data['DestinationId']
s3_update = data['S3DestinationUpdate'] if 'S3DestinationUpdate' in data else None
update_destination(stream_name=stream_name, destination_id=destination_id,
s3_update=s3_update, version_id=version_id)
response = {}
else:
response = error_response('Unknown action "%s"' % action, code=400, error_type='InvalidAction')
if isinstance(response, dict):
response = jsonify(response)
return response
def serve(port, quiet=True):
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
| 34 | 105 | 0.680108 |
faae608a4431e2af017b3ec16ef7349099f402f3 | 6,687 | py | Python | nipype/workflows/dmri/dtitk/tensor_registration.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/workflows/dmri/dtitk/tensor_registration.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/workflows/dmri/dtitk/tensor_registration.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # -*- coding: utf-8 -*-
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from ....pipeline import engine as pe
from ....interfaces import utility as niu
from ....interfaces import dtitk
def affine_tensor_pipeline(name='AffTen'):
"""
Workflow that performs a linear registration
(Rigid followed by Affine)
Example
-------
>>> from nipype.workflows.dmri.dtitk.tensor_registration import affine_tensor_pipeline
>>> affine = affine_tensor_pipeline()
>>> affine.inputs.inputnode.fixed_file = 'im1.nii'
>>> affine.inputs.inputnode.moving_file = 'im2.nii'
>>> affine.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['fixed_file', 'moving_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_file_xfm']),
name='outputnode')
rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node')
affine_node = pe.Node(dtitk.Affine(), name='affine_node')
wf = pe.Workflow(name=name)
wf.connect(inputnode, 'fixed_file', rigid_node, 'fixed_file')
wf.connect(inputnode, 'moving_file', rigid_node, 'moving_file')
wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm')
wf.connect(inputnode, 'fixed_file', affine_node, 'fixed_file')
wf.connect(inputnode, 'moving_file', affine_node, 'moving_file')
wf.connect(affine_node, 'out_file', outputnode, 'out_file')
wf.connect(affine_node, 'out_file_xfm', outputnode, 'out_file_xfm')
return wf
def diffeomorphic_tensor_pipeline(name='DiffeoTen',
params={'array_size': (128, 128, 64)}):
"""
Workflow that performs a diffeomorphic registration
(Rigid and Affine followed by Diffeomorphic)
Note: the requirements for a diffeomorphic registration specify that
the dimension 0 is a power of 2 so images are resliced prior to
registration. Remember to move origin and reslice prior to applying xfm to
another file!
Example
-------
>>> from nipype.workflows.dmri.dtitk.tensor_registration import diffeomorphic_tensor_pipeline
>>> diffeo = diffeomorphic_tensor_pipeline()
>>> diffeo.inputs.inputnode.fixed_file = 'im1.nii'
>>> diffeo.inputs.inputnode.moving_file = 'im2.nii'
>>> diffeo.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['fixed_file', 'moving_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_file_xfm',
'fixed_resliced', 'moving_resliced']),
name='outputnode')
origin_node_fixed = pe.Node(dtitk.TVAdjustVoxSp(origin=(0, 0, 0)),
name='origin_node_fixed')
origin_node_moving = origin_node_fixed.clone(name='origin_node_moving')
reslice_node_pow2 = pe.Node(dtitk.TVResample(
origin=(0, 0, 0),
array_size=params['array_size']),
name='reslice_node_pow2')
reslice_node_moving = pe.Node(dtitk.TVResample(),
name='reslice_node_moving')
mask_node = pe.Node(dtitk.BinThresh(lower_bound=0.01, upper_bound=100,
inside_value=1, outside_value=0),
name='mask_node')
rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node')
affine_node = pe.Node(dtitk.Affine(), name='affine_node')
diffeo_node = pe.Node(dtitk.Diffeo(n_iters=6, ftol=0.002),
name='diffeo_node')
compose_xfm_node = pe.Node(dtitk.ComposeXfm(), name='compose_xfm_node')
apply_xfm_node = pe.Node(dtitk.DiffeoSymTensor3DVol(),
name='apply_xfm_node')
adjust_vs_node_to_input = pe.Node(dtitk.TVAdjustVoxSp(),
name='adjust_vs_node_to_input')
reslice_node_to_input = pe.Node(dtitk.TVResample(),
name='reslice_node_to_input')
input_fa = pe.Node(dtitk.TVtool(in_flag='fa'), name='input_fa')
wf = pe.Workflow(name=name)
# calculate input FA image for origin reference
wf.connect(inputnode, 'fixed_file', input_fa, 'in_file')
# Reslice input images
wf.connect(inputnode, 'fixed_file', origin_node_fixed, 'in_file')
wf.connect(origin_node_fixed, 'out_file', reslice_node_pow2, 'in_file')
wf.connect(reslice_node_pow2, 'out_file',
reslice_node_moving, 'target_file')
wf.connect(inputnode, 'moving_file', origin_node_moving, 'in_file')
wf.connect(origin_node_moving, 'out_file', reslice_node_moving, 'in_file')
# Rigid registration
wf.connect(reslice_node_pow2, 'out_file', rigid_node, 'fixed_file')
wf.connect(reslice_node_moving, 'out_file', rigid_node, 'moving_file')
# Affine registration
wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm')
wf.connect(reslice_node_pow2, 'out_file', affine_node, 'fixed_file')
wf.connect(reslice_node_moving, 'out_file', affine_node, 'moving_file')
# Diffeo registration
wf.connect(reslice_node_pow2, 'out_file', mask_node, 'in_file')
wf.connect(reslice_node_pow2, 'out_file', diffeo_node, 'fixed_file')
wf.connect(affine_node, 'out_file', diffeo_node, 'moving_file')
wf.connect(mask_node, 'out_file', diffeo_node, 'mask_file')
# Compose transform
wf.connect(diffeo_node, 'out_file_xfm', compose_xfm_node, 'in_df')
wf.connect(affine_node, 'out_file_xfm', compose_xfm_node, 'in_aff')
# Apply transform
wf.connect(reslice_node_moving, 'out_file', apply_xfm_node, 'in_file')
wf.connect(compose_xfm_node, 'out_file', apply_xfm_node, 'transform')
# Move origin and reslice to match original fixed input image
wf.connect(apply_xfm_node, 'out_file', adjust_vs_node_to_input, 'in_file')
wf.connect(input_fa, 'out_file', adjust_vs_node_to_input, 'target_file')
wf.connect(adjust_vs_node_to_input, 'out_file', reslice_node_to_input, 'in_file')
wf.connect(input_fa, 'out_file', reslice_node_to_input, 'target_file')
# Send to output
wf.connect(reslice_node_to_input, 'out_file', outputnode, 'out_file')
wf.connect(compose_xfm_node, 'out_file', outputnode, 'out_file_xfm')
wf.connect(reslice_node_pow2, 'out_file', outputnode, 'fixed_resliced')
wf.connect(reslice_node_moving, 'out_file', outputnode, 'moving_resliced')
return wf
| 46.117241 | 97 | 0.657993 |
3fa9bc6ce633f6234aa568fd959eb04b513d6f89 | 6,069 | py | Python | sharkpylib/plot/html_plot.py | sharksmhi/sharkpylib | 2a1d3cf3c15729e50525ab8da5920b6f9bb3faf2 | [
"MIT"
] | null | null | null | sharkpylib/plot/html_plot.py | sharksmhi/sharkpylib | 2a1d3cf3c15729e50525ab8da5920b6f9bb3faf2 | [
"MIT"
] | null | null | null | sharkpylib/plot/html_plot.py | sharksmhi/sharkpylib | 2a1d3cf3c15729e50525ab8da5920b6f9bb3faf2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on Wed Aug 8 12:05:15 2018
@author:
"""
try:
import plotly.graph_objs as go
import plotly.offline as ply
# import plotly.plotly as ply
except:
pass
class PlotlyPlot():
"""
Class uses plotly to plot data.
"""
def __init__(self,
**kwargs):
self.data = []
self.set_layout(**kwargs)
#==========================================================================
def _get_plot_name(self, plot_type='', **kwargs):
default_plot_name = '{}_{}_plot'.format(len(self.data)+1, plot_type)
return kwargs.get('name', default_plot_name)
#==========================================================================
def get_figure(self):
return dict(data=self.data,
layout=self.layout)
#==========================================================================
def set_layout(self, **kwargs):
self.layout = dict(title = kwargs.get('title', 'title'),
xaxis = dict(title = kwargs.get('xaxis_title', 'xaxis')),
yaxis = dict(title = kwargs.get('yaxis_title', 'yaxis')))
#==========================================================================
def add_bar_data(self, x, y, **kwargs):
trace = go.Bar(x=x,
y=y,
name = self._get_plot_name('bar', **kwargs),
)
self.data.append(trace)
self.layout['barmode'] = kwargs.get('barmode', 'group') # stack
#==========================================================================
def add_box_data(self, x=None, y=None, **kwargs):
trace = go.Box(
x=x,
y=y,
name=self._get_plot_name('bar', **kwargs),
marker=dict(
color=kwargs.get('marker_color', None)
),
boxpoints=kwargs.get('boxpoints', None),
)
self.data.append(trace)
#==========================================================================
def get_position(self, pos, space_percent=5):
"""
pos is a four digit number or str that shows where to place the chart.
First two are the grid and the last to are the desired position.
"""
padding = space_percent/100.
pos = list(str(pos))
nr_x = int(pos[0])
nr_y = int(pos[1])
pos_x = int(pos[2])
pos_y = int(pos[3])
dx = 1./nr_x
dy = 1./nr_y
x_list = []
for x in range(nr_x):
x_list.append([dx*x+padding, dx*(x+1)-padding])
y_list = []
for y in range(nr_y):
y_list.append([dy*y+padding, dy*(y+1)-padding])
# print('-'*30)
# print(x_list)
# print(y_list)
# print(x_list[pos_x-1], y_list[pos_y-1])
return [x_list[pos_x-1], y_list[pos_y-1]]
#==========================================================================
def add_pie_data(self, x, y, pos=1111, **kwargs):
x_pos, y_pos = self.get_position(pos)
trace = go.Pie(labels=x,
values=y,
name = self._get_plot_name('pie', **kwargs),
domain = {'x': x_pos,
'y': y_pos}
)
self.data.append(trace)
#==========================================================================
def add_scatter_data(self, x, y, **kwargs):
trace = go.Scatter(x = x,
y = y,
name = self._get_plot_name('scatter', **kwargs),
mode = kwargs.get('mode', 'lines'),
line = dict(color = kwargs.get('line_color', None),
width = kwargs.get('line_width', 2),
dash = kwargs.get('dash', 'solid'))
)
self.data.append(trace)
#==========================================================================
def add_profile_dataframe(self, df, depth_par='DEPH', **kwargs):
"""
All parameters in df are plotted with depth_par as y-axis.
"label_mapping" and "unit_mapping" can be given as kwargs.
"""
kw = dict(mode='lines+markers')
kw.update(kwargs)
for col in df.columns:
if col == depth_par:
continue
if df[col].dtype != float:
continue
# Set label
if kwargs.get('label_mapping'):
name = kwargs['label_mapping'](col)
else:
name = col
# Add unit
if kwargs.get('unit_mapping'):
name = name + ' ({})'.format(kwargs['unit_mapping'](col))
# Add plot
self.add_scatter_data(df[col], df[depth_par], name=name, **kw)
#==========================================================================
def plot_to_file(self, file_path):
fig = self.get_figure()
# Plot to file
# file_path = file_path.replace('/', '_').replace('\\', '_')
ply.plot(fig, filename=file_path)
#==========================================================================
def plot_in_notebook(self):
# print(ply)
ply.init_notebook_mode(connected=True)
fig = self.get_figure()
ply.iplot(fig)
| 32.805405 | 85 | 0.394628 |
d22315e2d3b793338e75455844228757626ffaaf | 2,402 | py | Python | tests/test_lightcurve.py | LanzLagman/chronos | 3c7e32e7bd8ed85d442ce3ecbf4c9a5272e8e470 | [
"MIT"
] | null | null | null | tests/test_lightcurve.py | LanzLagman/chronos | 3c7e32e7bd8ed85d442ce3ecbf4c9a5272e8e470 | [
"MIT"
] | null | null | null | tests/test_lightcurve.py | LanzLagman/chronos | 3c7e32e7bd8ed85d442ce3ecbf4c9a5272e8e470 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import lightkurve as lk
from chronos import ShortCadence, LongCadence
TICID = 460205581
SECTOR = 10
CUTOUT_SIZE = (15, 15)
QUALITY_BITMASK = "default"
def test_sc_pipeline():
"""
"""
sc = ShortCadence(
ticid=TICID, sap_mask="pipeline", quality_bitmask=QUALITY_BITMASK
)
_ = sc.get_lc()
assert isinstance(sc.lc_pdcsap, lk.LightCurve)
assert isinstance(sc.lc_sap, lk.LightCurve)
def test_sc_square():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
threshold_sigma=5,
percentile=95,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "square"
def test_sc_round():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="round",
aper_radius=1,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "round"
def test_sc_threshold():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="threshold",
threshold_sigma=5,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "threshold"
def test_sc_percentile():
"""
"""
sc = ShortCadence(
ticid=TICID,
sap_mask="percentile",
percentile=90,
quality_bitmask=QUALITY_BITMASK,
)
_ = sc.make_custom_lc()
assert isinstance(sc.lc_custom, lk.LightCurve)
# assert sc.sap_mask == "percentile"
def test_sc_triceratops():
"""
"""
sc = ShortCadence(ticid=TICID, calc_fpp=True)
# df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert sc.triceratops is not None
def test_lc():
"""
"""
lc = LongCadence(
ticid=TICID,
sap_mask="square",
aper_radius=1,
cutout_size=CUTOUT_SIZE,
quality_bitmask=QUALITY_BITMASK,
)
_ = lc.make_custom_lc()
assert isinstance(lc.lc_custom, lk.LightCurve)
def test_lc_triceratops():
"""
"""
lc = LongCadence(ticid=TICID, calc_fpp=True)
# df = sc.get_NEB_depths()
# df = sc.get_fpp(flat=flat, plot=False)
assert lc.triceratops is not None
| 21.836364 | 73 | 0.616986 |
cadd8e158698ca8310fefabe455b53b767f1e7de | 25,983 | py | Python | fiftyone/core/odm/sample.py | brimoor/fiftyone | 237d288955415575f885a3e58accf04d38bd1856 | [
"Apache-2.0"
] | 1 | 2020-10-09T05:16:49.000Z | 2020-10-09T05:16:49.000Z | fiftyone/core/odm/sample.py | brimoor/fiftyone | 237d288955415575f885a3e58accf04d38bd1856 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/odm/sample.py | brimoor/fiftyone | 237d288955415575f885a3e58accf04d38bd1856 | [
"Apache-2.0"
] | null | null | null | """
Backing document classes for :class:`fiftyone.core.sample.Sample` instances.
Class hierarchy::
SampleDocument
├── NoDatasetSampleDocument
└── DatasetSampleDocument
├── my_custom_dataset
├── another_dataset
└── ...
Design invariants:
- A :class:`fiftyone.core.sample.Sample` always has a backing
``sample._doc``, which is an instance of a subclass of
:class:`SampleDocument`
- A :class:`fiftyone.core.dataset.Dataset` always has a backing
``dataset._sample_doc_cls`` which is a subclass of
:class:`DatasetSampleDocument``.
**Implementation details**
When a new :class:`fiftyone.core.sample.Sample` is created, its ``_doc``
attribute is an instance of :class:`NoDatasetSampleDocument`::
import fiftyone as fo
sample = fo.Sample()
sample._doc # NoDatasetSampleDocument
When a new :class:`fiftyone.core.dataset.Dataset` is created, its
``_sample_doc_cls`` attribute holds a dynamically created subclass of
:class:`DatasetSampleDocument` whose name is the name of the dataset::
dataset = fo.Dataset(name="my_dataset")
dataset._sample_doc_cls # my_dataset(DatasetSampleDocument)
When a sample is added to a dataset, its ``_doc`` attribute is changed from
type :class:`NoDatasetSampleDocument` to type ``dataset._sample_doc_cls``::
dataset.add_sample(sample)
sample._doc # my_dataset(DatasetSampleDocument)
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from collections import OrderedDict
from functools import wraps
import json
import numbers
import os
import random
from bson import json_util
from bson.binary import Binary
from mongoengine.errors import InvalidQueryError
import numpy as np
import six
import fiftyone as fo
import fiftyone.core.fields as fof
import fiftyone.core.metadata as fom
import fiftyone.core.utils as fou
from .dataset import SampleFieldDocument, DatasetDocument
from .document import (
Document,
BaseEmbeddedDocument,
SerializableDocument,
)
# Use our own Random object to avoid messing with the user's seed
_random = random.Random()
def _generate_rand(filepath=None):
if filepath is not None:
_random.seed(filepath)
return _random.random() * 0.001 + 0.999
def default_sample_fields(include_private=False):
"""The default fields present on all :class:`SampleDocument` objects.
Args:
include_private (False): whether to include fields that start with `_`
Returns:
a tuple of field names
"""
return DatasetSampleDocument._get_fields_ordered(
include_private=include_private
)
def no_delete_default_field(func):
"""Wrapper for :func:`SampleDocument.delete_field` that prevents deleting
default fields of :class:`SampleDocument`.
This is a decorator because the subclasses implement this as either an
instance or class method.
"""
@wraps(func)
def wrapper(cls_or_self, field_name, *args, **kwargs):
# pylint: disable=no-member
if field_name in default_sample_fields():
raise ValueError("Cannot delete default field '%s'" % field_name)
return func(cls_or_self, field_name, *args, **kwargs)
return wrapper
class SampleDocument(SerializableDocument):
"""Interface for sample backing documents."""
@property
def collection_name(self):
"""The name of the MongoDB collection to which this sample belongs, or
``None`` if it has not been added to a dataset.
"""
return None
@property
def in_db(self):
"""Whether the sample has been added to the database."""
return False
@property
def ingest_time(self):
"""The time the sample was added to the database, or ``None`` if it
has not been added to the database.
"""
return None
def has_field(self, field_name):
"""Determines whether the sample has a field of the given name.
Args:
field_name: the field name
Returns:
True/False
"""
raise NotImplementedError("Subclass must implement `has_field()`")
def get_field(self, field_name):
"""Gets the field of the sample.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
raise NotImplementedError("Subclass must implement `get_field()`")
def set_field(self, field_name, value, create=False):
"""Sets the value of a field of the sample.
Args:
field_name: the field name
value: the field value
create (False): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
raise NotImplementedError("Subclass must implement `set_field()`")
def clear_field(self, field_name):
"""Clears the value of a field of the sample.
Args:
field_name: the field name
Raises:
ValueError: if the field does not exist
"""
raise NotImplementedError("Subclass must implement `clear_field()`")
class DatasetSampleDocument(Document, SampleDocument):
"""Base class for sample documents backing samples in datasets.
All ``fiftyone.core.dataset.Dataset._sample_doc_cls`` classes inherit from
this class.
"""
meta = {"abstract": True}
# The path to the data on disk
filepath = fof.StringField(unique=True)
# The set of tags associated with the sample
tags = fof.ListField(fof.StringField())
# Metadata about the sample media
metadata = fof.EmbeddedDocumentField(fom.Metadata, null=True)
# Random float used for random dataset operations (e.g. shuffle)
_rand = fof.FloatField(default=_generate_rand)
def __setattr__(self, name, value):
# pylint: disable=no-member
has_field = self.has_field(name)
if name.startswith("_") or (hasattr(self, name) and not has_field):
super().__setattr__(name, value)
return
if not has_field:
raise ValueError(
"Adding sample fields using the `sample.field = value` syntax "
"is not allowed; use `sample['field'] = value` instead"
)
if value is not None:
self._fields[name].validate(value)
super().__setattr__(name, value)
@property
def collection_name(self):
return self.__class__.__name__
@property
def field_names(self):
return tuple(
f
for f in self._get_fields_ordered(include_private=False)
if f != "id"
)
@classmethod
def get_field_schema(
cls, ftype=None, embedded_doc_type=None, include_private=False
):
"""Returns a schema dictionary describing the fields of this sample.
If the sample belongs to a dataset, the schema will apply to all
samples in the dataset.
Args:
ftype (None): an optional field type to which to restrict the
returned schema. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): an optional embedded document type to
which to restrict the returned schema. Must be a subclass of
:class:`fiftyone.core.odm.BaseEmbeddedDocument`
include_private (False): whether to include fields that start with
`_` in the returned schema
Returns:
a dictionary mapping field names to field types
"""
if ftype is None:
ftype = fof.Field
if not issubclass(ftype, fof.Field):
raise ValueError(
"Field type %s must be subclass of %s" % (ftype, fof.Field)
)
if embedded_doc_type and not issubclass(
ftype, fof.EmbeddedDocumentField
):
raise ValueError(
"embedded_doc_type should only be specified if ftype is a"
" subclass of %s" % fof.EmbeddedDocumentField
)
d = OrderedDict()
field_names = cls._get_fields_ordered(include_private=include_private)
for field_name in field_names:
# pylint: disable=no-member
field = cls._fields[field_name]
if not isinstance(cls._fields[field_name], ftype):
continue
if embedded_doc_type and not issubclass(
field.document_type, embedded_doc_type
):
continue
d[field_name] = field
return d
def has_field(self, field_name):
# pylint: disable=no-member
return field_name in self._fields
def get_field(self, field_name):
if not self.has_field(field_name):
raise AttributeError("Sample has no field '%s'" % field_name)
return getattr(self, field_name)
@classmethod
def add_field(
cls,
field_name,
ftype,
embedded_doc_type=None,
subfield=None,
save=True,
):
"""Adds a new field to the sample.
Args:
field_name: the field name
ftype: the field type to create. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): the
:class:`fiftyone.core.odm.BaseEmbeddedDocument` type of the
field. Used only when ``ftype`` is an embedded
:class:`fiftyone.core.fields.EmbeddedDocumentField`
subfield (None): the type of the contained field. Used only when
``ftype`` is a :class:`fiftyone.core.fields.ListField` or
:class:`fiftyone.core.fields.DictField`
"""
# Additional arg `save` is to prevent saving the fields when reloading
# a dataset from the database.
# pylint: disable=no-member
if field_name in cls._fields:
raise ValueError("Field '%s' already exists" % field_name)
field = _create_field(
field_name,
ftype,
embedded_doc_type=embedded_doc_type,
subfield=subfield,
)
cls._fields[field_name] = field
cls._fields_ordered += (field_name,)
try:
if issubclass(cls, DatasetSampleDocument):
# Only set the attribute if it is a class
setattr(cls, field_name, field)
except TypeError:
# Instance, not class, so do not `setattr`
pass
if save:
# Update dataset meta class
dataset_doc = DatasetDocument.objects.get(
sample_collection_name=cls.__name__
)
field = cls._fields[field_name]
sample_field = SampleFieldDocument.from_field(field)
dataset_doc.sample_fields.append(sample_field)
dataset_doc.save()
@classmethod
def add_implied_field(cls, field_name, value):
"""Adds the field to the sample, inferring the field type from the
provided value.
Args:
field_name: the field name
value: the field value
"""
# pylint: disable=no-member
if field_name in cls._fields:
raise ValueError("Field '%s' already exists" % field_name)
cls.add_field(field_name, **_get_implied_field_kwargs(value))
def set_field(self, field_name, value, create=False):
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
if hasattr(self, field_name) and not self.has_field(field_name):
raise ValueError("Cannot use reserved keyword '%s'" % field_name)
if not self.has_field(field_name):
if create:
self.add_implied_field(field_name, value)
else:
msg = "Sample does not have field '%s'." % field_name
if value is not None:
# don't report this when clearing a field.
msg += " Use `create=True` to create a new field."
raise ValueError(msg)
self.__setattr__(field_name, value)
def clear_field(self, field_name):
self.set_field(field_name, None, create=False)
@classmethod
@no_delete_default_field
def delete_field(cls, field_name):
"""Deletes the field from the sample.
If the sample is in a dataset, the field will be removed from all
samples in the dataset.
Args:
field_name: the field name
Raises:
AttributeError: if the field does not exist
"""
try:
# Delete from all samples
# pylint: disable=no-member
cls.objects.update(**{"unset__%s" % field_name: None})
except InvalidQueryError:
raise AttributeError("Sample has no field '%s'" % field_name)
# Remove from dataset
# pylint: disable=no-member
del cls._fields[field_name]
cls._fields_ordered = tuple(
fn for fn in cls._fields_ordered if fn != field_name
)
delattr(cls, field_name)
# Update dataset meta class
dataset_doc = DatasetDocument.objects.get(
sample_collection_name=cls.__name__
)
dataset_doc.sample_fields = [
sf for sf in dataset_doc.sample_fields if sf.name != field_name
]
dataset_doc.save()
def _update(self, object_id, update_doc, filtered_fields=None, **kwargs):
"""Updates an existing document.
Helper method; should only be used inside
:meth:`DatasetSampleDocument.save`.
"""
updated_existing = True
collection = self._get_collection()
select_dict = {"_id": object_id}
extra_updates = self._extract_extra_updates(
update_doc, filtered_fields
)
if update_doc:
result = collection.update_one(
select_dict, update_doc, upsert=True
).raw_result
if result is not None:
updated_existing = result.get("updatedExisting")
for update, element_id in extra_updates:
result = collection.update_one(
select_dict,
update,
array_filters=[{"element._id": element_id}],
upsert=True,
).raw_result
if result is not None:
updated_existing = updated_existing and result.get(
"updatedExisting"
)
return updated_existing
def _extract_extra_updates(self, update_doc, filtered_fields):
"""Extracts updates for filtered list fields that need to be updated
by ID, not relative position (index).
"""
extra_updates = []
#
# Check for illegal modifications
# Match the list, or an indexed item in the list, but not a field
# of an indexed item of the list:
# my_detections.detections <- MATCH
# my_detections.detections.1 <- MATCH
# my_detections.detections.1.label <- NO MATCH
#
if filtered_fields:
for d in update_doc.values():
for k in d.keys():
for ff in filtered_fields:
if k.startswith(ff) and not k.lstrip(ff).count("."):
raise ValueError(
"Modifying root of filtered list field '%s' "
"is not allowed" % k
)
if filtered_fields and "$set" in update_doc:
d = update_doc["$set"]
del_keys = []
for k, v in d.items():
filtered_field = None
for ff in filtered_fields:
if k.startswith(ff):
filtered_field = ff
break
if filtered_field:
element_id, el_filter = self._parse_id_and_array_filter(
k, filtered_field
)
extra_updates.append(
({"$set": {el_filter: v}}, element_id)
)
del_keys.append(k)
for k in del_keys:
del d[k]
if not update_doc["$set"]:
del update_doc["$set"]
return extra_updates
def _parse_id_and_array_filter(self, list_element_field, filtered_field):
"""Converts the ``list_element_field`` and ``filtered_field`` to an
element object ID and array filter.
Example::
Input:
list_element_field = "test_dets.detections.1.label"
filtered_field = "test_dets.detections"
Output:
ObjectID("5f2062bf27c024654f5286a0")
"test_dets.detections.$[element].label"
"""
el = self
for field_name in filtered_field.split("."):
el = el[field_name]
el_fields = list_element_field.lstrip(filtered_field).split(".")
idx = int(el_fields.pop(0))
el = el[idx]
el_filter = ".".join([filtered_field, "$[element]"] + el_fields)
return el._id, el_filter
@classmethod
def _get_fields_ordered(cls, include_private=False):
if include_private:
return cls._fields_ordered
return tuple(f for f in cls._fields_ordered if not f.startswith("_"))
class NoDatasetSampleDocument(SampleDocument):
"""Backing document for samples that have not been added to a dataset."""
# pylint: disable=no-member
default_fields = DatasetSampleDocument._fields
default_fields_ordered = default_sample_fields(include_private=True)
def __init__(self, **kwargs):
self._data = OrderedDict()
filepath = kwargs.get("filepath", None)
for field_name in self.default_fields_ordered:
value = kwargs.pop(field_name, None)
if field_name == "_rand":
value = _generate_rand(filepath=filepath)
if value is None:
value = self._get_default(self.default_fields[field_name])
if field_name == "filepath":
value = os.path.abspath(os.path.expanduser(value))
self._data[field_name] = value
self._data.update(kwargs)
def __getattr__(self, name):
try:
return self._data[name]
except Exception:
pass
return super().__getattribute__(name)
def __setattr__(self, name, value):
if name.startswith("_"):
super().__setattr__(name, value)
return
has_field = self.has_field(name)
if hasattr(self, name) and not has_field:
super().__setattr__(name, value)
return
if not has_field:
raise ValueError(
"Adding sample fields using the `sample.field = value` syntax "
"is not allowed; use `sample['field'] = value` instead"
)
self._data[name] = value
@property
def id(self):
return None
def _get_repr_fields(self):
return ("id",) + self.field_names
@property
def field_names(self):
return tuple(k for k in self._data.keys() if not k.startswith("_"))
@staticmethod
def _get_default(field):
if field.null:
return None
if field.default is not None:
value = field.default
if callable(value):
value = value()
if isinstance(value, list) and value.__class__ != list:
value = list(value)
elif isinstance(value, tuple) and value.__class__ != tuple:
value = tuple(value)
elif isinstance(value, dict) and value.__class__ != dict:
value = dict(value)
return value
raise ValueError("Field '%s' has no default" % field)
def has_field(self, field_name):
try:
return field_name in self._data
except AttributeError:
# If `_data` is not initialized
return False
def get_field(self, field_name):
if not self.has_field(field_name):
raise AttributeError("Sample has no field '%s'" % field_name)
return getattr(self, field_name)
def set_field(self, field_name, value, create=False):
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
if hasattr(self, field_name) and not self.has_field(field_name):
raise ValueError("Cannot use reserved keyword '%s'" % field_name)
if not self.has_field(field_name):
if create:
# dummy value so that it is identified by __setattr__
self._data[field_name] = None
else:
msg = "Sample does not have field '%s'." % field_name
if value is not None:
# don't report this when clearing a field.
msg += " Use `create=True` to create a new field."
raise ValueError(msg)
self.__setattr__(field_name, value)
def clear_field(self, field_name):
if field_name in self.default_fields:
default_value = self._get_default(self.default_fields[field_name])
self.set_field(field_name, default_value)
else:
self._data.pop(field_name, None)
def to_dict(self, extended=False):
d = {}
for k, v in self._data.items():
if hasattr(v, "to_dict"):
# Embedded document
d[k] = v.to_dict(extended=extended)
elif isinstance(v, np.ndarray):
# Must handle arrays separately, since they are non-primitives
# @todo cannot support serializing 1D arrays as lists because
# there is no way for `from_dict` to know that the data should
# be converted back to a numpy array
#
# if v.ndim == 1:
# d[k] = v.tolist()
#
v_binary = fou.serialize_numpy_array(v)
if extended:
# @todo improve this
d[k] = json.loads(json_util.dumps(Binary(v_binary)))
else:
d[k] = v_binary
else:
# JSON primitive
d[k] = v
return d
@classmethod
def from_dict(cls, d, extended=False):
kwargs = {}
for k, v in d.items():
if isinstance(v, dict):
if "_cls" in v:
# Serialized embedded document
_cls = getattr(fo, v["_cls"])
kwargs[k] = _cls.from_dict(v)
elif "$binary" in v:
# Serialized array in extended format
binary = json_util.loads(json.dumps(v))
kwargs[k] = fou.deserialize_numpy_array(binary)
else:
kwargs[k] = v
elif isinstance(v, six.binary_type):
# Serialized array in non-extended format
kwargs[k] = fou.deserialize_numpy_array(v)
else:
kwargs[k] = v
return cls(**kwargs)
def save(self):
"""Saves the sample to the database.
Because the sample does not belong to a dataset, this method does
nothing.
"""
pass
def reload(self):
"""Reloads the sample from the database.
Because the sample does not belong to a dataset, this method does
nothing.
"""
pass
def delete(self):
"""Deletes the sample from the database.
Because the sample does not belong to a dataset, this method does
nothing.
"""
pass
def _get_implied_field_kwargs(value):
if isinstance(value, BaseEmbeddedDocument):
return {
"ftype": fof.EmbeddedDocumentField,
"embedded_doc_type": type(value),
}
if isinstance(value, bool):
return {"ftype": fof.BooleanField}
if isinstance(value, six.integer_types):
return {"ftype": fof.IntField}
if isinstance(value, numbers.Number):
return {"ftype": fof.FloatField}
if isinstance(value, six.string_types):
return {"ftype": fof.StringField}
if isinstance(value, (list, tuple)):
return {"ftype": fof.ListField}
if isinstance(value, np.ndarray):
if value.ndim == 1:
return {"ftype": fof.VectorField}
return {"ftype": fof.ArrayField}
if isinstance(value, dict):
return {"ftype": fof.DictField}
raise TypeError("Unsupported field value '%s'" % type(value))
def _create_field(field_name, ftype, embedded_doc_type=None, subfield=None):
if not issubclass(ftype, fof.Field):
raise ValueError(
"Invalid field type '%s'; must be a subclass of '%s'"
% (ftype, fof.Field)
)
kwargs = {"db_field": field_name}
if issubclass(ftype, fof.EmbeddedDocumentField):
kwargs.update({"document_type": embedded_doc_type})
kwargs["null"] = True
elif issubclass(ftype, (fof.ListField, fof.DictField)):
if subfield is not None:
kwargs["field"] = subfield
else:
kwargs["null"] = True
field = ftype(**kwargs)
field.name = field_name
return field
| 31.192077 | 79 | 0.588808 |
907f3be18883c1b803a297243285970e02649793 | 825 | py | Python | userbot/plugins/spam.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | userbot/plugins/spam.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | userbot/plugins/spam.py | meaall-com/Telebot | a08193ae6c3e5814b309d079e95c4951eafcbc19 | [
"MIT"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
from asyncio import wait
from telethon import events
@borg.on(events.NewMessage(pattern=r"\.spam", outgoing=True))
async def spammer(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[6:8])
spam_message = str(e.text[8:])
await wait(
[e.respond(spam_message) for i in range(counter)]
)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP,
"#SPAM \n\n"
"تم تنفيذ البريد العشوائي بنجاح"
) | 27.5 | 78 | 0.578182 |
a63d09242fddfc66682489a0bf90025b3ef4e9e2 | 7,590 | py | Python | birdseye/pfrnn/pfrnn_utils.py | emmair/BirdsEye | 3ca1814b8f007388cfabf7ec928af2316a395e1a | [
"Apache-2.0"
] | null | null | null | birdseye/pfrnn/pfrnn_utils.py | emmair/BirdsEye | 3ca1814b8f007388cfabf7ec928af2316a395e1a | [
"Apache-2.0"
] | null | null | null | birdseye/pfrnn/pfrnn_utils.py | emmair/BirdsEye | 3ca1814b8f007388cfabf7ec928af2316a395e1a | [
"Apache-2.0"
] | null | null | null | # original source: https://github.com/Yusufma03/pfrnns/blob/master/pfrnns.py
import torch
from torch import nn
import numpy as np
class PFRNNBaseCell(nn.Module):
"""
This is the base class for the PF-RNNs. We implement the shared functions here, including
1. soft-resampling
2. reparameterization trick
3. obs_extractor o_t(x_t)
4. control_extractor u_t(x_t)
All particles in PF-RNNs are processed in parallel to benefit from GPU parallelization.
"""
def __init__(self, num_particles, input_size, hidden_size, ext_obs, ext_act, resamp_alpha):
"""
:param num_particles: number of particles for a PF-RNN
:param input_size: the size of input x_t
:param hidden_size: the size of the hidden particle h_t^i
:param ext_obs: the size for o_t(x_t)
:param ext_act: the size for u_t(x_t)
:param resamp_alpha: the control parameter \alpha for soft-resampling.
We use the importance sampling with a proposal distribution q(i) = \alpha w_t^i + (1 - \alpha) (1 / K)
"""
super(PFRNNBaseCell, self).__init__()
self.num_particles = num_particles
self.input_size = input_size
self.h_dim = hidden_size
self.ext_obs = ext_obs
self.ext_act = ext_act
self.resamp_alpha = resamp_alpha
self.obs_extractor = nn.Sequential(
nn.Linear(self.input_size, self.ext_obs),
nn.LeakyReLU()
)
self.act_extractor = nn.Sequential(
nn.Linear(self.input_size, self.ext_act),
nn.LeakyReLU()
)
self.fc_obs = nn.Linear(self.ext_obs + self.h_dim, 1)
self.batch_norm = nn.BatchNorm1d(self.num_particles)
def resampling(self, particles, prob):
"""
The implementation of soft-resampling. We implement soft-resampling in a batch-manner.
:param particles: \{(h_t^i, c_t^i)\}_{i=1}^K for PF-LSTM and \{h_t^i\}_{i=1}^K for PF-GRU.
each tensor has a shape: [num_particles * batch_size, h_dim]
:param prob: weights for particles in the log space. Each tensor has a shape: [num_particles * batch_size, 1]
:return: resampled particles and weights according to soft-resampling scheme.
"""
resamp_prob = self.resamp_alpha * torch.exp(prob) + (1 -
self.resamp_alpha) * 1 / self.num_particles
resamp_prob = resamp_prob.view(self.num_particles, -1)
indices = torch.multinomial(resamp_prob.transpose(0, 1),
num_samples=self.num_particles, replacement=True)
batch_size = indices.size(0)
indices = indices.transpose(1, 0).contiguous()
offset = torch.arange(batch_size).type(torch.LongTensor).unsqueeze(0)
if torch.cuda.is_available():
offset = offset.cuda()
indices = offset + indices * batch_size
flatten_indices = indices.view(-1, 1).squeeze()
# PFLSTM
if type(particles) == tuple:
particles_new = (particles[0][flatten_indices],
particles[1][flatten_indices])
# PFGRU
else:
particles_new = particles[flatten_indices]
prob_new = torch.exp(prob.view(-1, 1)[flatten_indices])
prob_new = prob_new / (self.resamp_alpha * prob_new + (1 -
self.resamp_alpha) / self.num_particles)
prob_new = torch.log(prob_new).view(self.num_particles, -1, 1)
prob_new = prob_new - torch.logsumexp(prob_new, dim=0, keepdim=True)
prob_new = prob_new.view(-1, 1)
return particles_new, prob_new
def reparameterize(self, mu, var):
"""
Reparameterization trick
:param mu: mean
:param var: variance
:return: new samples from the Gaussian distribution
"""
std = torch.nn.functional.softplus(var)
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.shape).normal_()
else:
eps = torch.FloatTensor(std.shape).normal_()
return mu + eps * std
class PFLSTMCell(PFRNNBaseCell):
def __init__(self, num_particles, input_size, hidden_size, ext_obs, ext_act, resamp_alpha):
super().__init__(num_particles, input_size,
hidden_size, ext_obs, ext_act, resamp_alpha)
self.fc_ih = nn.Linear(self.ext_act, 5 * self.h_dim)
self.fc_hh = nn.Linear(self.h_dim, 5 * self.h_dim)
def forward(self, input_, hx):
h0, c0, p0 = hx
batch_size = h0.size(0)
wh_b = self.fc_hh(h0)
# by default assume input_ = (obs, control)
obs = self.obs_extractor(input_)
act = self.act_extractor(input_)
wi = self.fc_ih(act)
s = wh_b + wi
f, i, o, mu, var = torch.split(s, split_size_or_sections=self.h_dim,
dim=1)
g_ = self.reparameterize(mu, var).view(
self.num_particles, -1, self.h_dim).transpose(0, 1).contiguous()
g = self.batch_norm(g_).transpose(
0, 1).contiguous().view(-1, self.h_dim)
c1 = torch.sigmoid(f) * c0 + torch.sigmoid(i) * \
nn.functional.leaky_relu(g)
h1 = torch.sigmoid(o) * torch.tanh(c1)
att = torch.cat((obs, h1), dim=1)
logpdf_obs = self.fc_obs(att)
# logpdf_obs = nn.functional.relu6(logpdf_obs).view(self.num_particles, -1, 1) - 3 # hack to shape the range obs logpdf_obs into [-3, 3] for numerical stability
p1 = logpdf_obs.view(self.num_particles, -1, 1) + \
p0.view(self.num_particles, -1, 1)
p1 = p1 - torch.logsumexp(p1, dim=0, keepdim=True)
(h1, c1), p1 = self.resampling((h1, c1), p1)
return h1, c1, p1
class PFGRUCell(PFRNNBaseCell):
def __init__(self, num_particles, input_size, hidden_size, ext_obs, ext_act, resamp_alpha):
super().__init__(num_particles, input_size,
hidden_size, ext_obs, ext_act, resamp_alpha)
self.fc_z = nn.Linear(self.h_dim + self.ext_act, self.h_dim)
self.fc_r = nn.Linear(self.h_dim + self.ext_act, self.h_dim)
self.fc_n = nn.Linear(self.h_dim + self.ext_act, self.h_dim * 2)
def forward(self, input_, hx):
h0, p0 = hx
# by default assume input = (obs, control)
obs = self.obs_extractor(input_)
act = self.act_extractor(input_)
z = torch.sigmoid(self.fc_z(torch.cat((h0, act), dim=1)))
r = torch.sigmoid(self.fc_r(torch.cat((h0, act), dim=1)))
n = self.fc_n(torch.cat((r * h0, act), dim=1))
mu_n, var_n = torch.split(n, split_size_or_sections=self.h_dim, dim=1)
n = self.reparameterize(mu_n, var_n)
n = n.view(self.num_particles, -1, self.h_dim).transpose(0,
1).contiguous()
n = self.batch_norm(n)
n = n.transpose(0, 1).contiguous().view(-1, self.h_dim)
n = nn.functional.leaky_relu(n)
h1 = (1 - z) * n + z * h0
att = torch.cat((h1, obs), dim=1)
logpdf_obs = self.fc_obs(att)
# logpdf_obs = nn.functional.relu6(logpdf_obs) - 3 # hack to shape the range obs logpdf_obs into [-3, 3] for numerical stability
p1 = logpdf_obs + p0
p1 = p1.view(self.num_particles, -1, 1)
p1 = p1 - torch.logsumexp(p1, dim=0, keepdim=True)
h1, p1 = self.resampling(h1, p1)
return h1, p1
| 39.947368 | 168 | 0.598287 |
157d74dbd2b5dc40cc18f3be35e2dfc6a94cfd2b | 1,291 | py | Python | 2d Brillouin Zone/hex_crystal.py | dongheig/hedhywl | c3293699e4f279b48dfa62f695144bcb9f0e85dd | [
"MIT"
] | 19 | 2018-06-15T11:49:46.000Z | 2022-02-11T01:11:21.000Z | 2d Brillouin Zone/hex_crystal.py | dongheig/hedhywl | c3293699e4f279b48dfa62f695144bcb9f0e85dd | [
"MIT"
] | null | null | null | 2d Brillouin Zone/hex_crystal.py | dongheig/hedhywl | c3293699e4f279b48dfa62f695144bcb9f0e85dd | [
"MIT"
] | 9 | 2018-07-04T10:52:53.000Z | 2022-01-17T09:21:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Model of simple hexagonal lattice """
import math
from crystal import Crystal
from sympy.geometry import Point
class HexCrystal(Crystal):
""" Model of simple hexagonal lattice """
def __init__(self, a, size, center):
self._a = a
super().__init__(size, center)
def _translate(self, pos_x, pos_y):
width = self._a
points = []
half_width = int(width / 2)
side_length = int(width * math.sqrt(3))
height = int(math.sqrt(side_length ** 2 - width ** 2 / 4))
pos_y *= (side_length + height)
pos_x *= width
half_width_periodic = half_width if pos_y % 2 == 0 else 0
points.append(Point(pos_x + half_width_periodic,
pos_y))
points.append(Point(pos_x + half_width_periodic + width,
pos_y))
points.append(Point(pos_x + half_width_periodic,
pos_y + side_length))
points.append(Point(pos_x + half_width_periodic + width,
pos_y + side_length))
points.append(Point(pos_x + half_width_periodic + half_width,
pos_y - height))
points.append(Point(pos_x + half_width_periodic + half_width,
pos_y + side_length + height))
return points
| 30.738095 | 65 | 0.611154 |
765c625b93edde0faf46671652fe39babcb63ddf | 1,230 | py | Python | scripts/prepare_index.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 32 | 2020-07-10T04:50:03.000Z | 2021-11-26T16:57:01.000Z | scripts/prepare_index.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 5 | 2020-07-10T07:55:34.000Z | 2021-11-24T02:45:32.000Z | scripts/prepare_index.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 3 | 2020-08-20T15:10:35.000Z | 2022-02-20T16:31:01.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : prepare_index.py
# Author : Honghua Dong
# Email : dhh19951@gmail.com
# Date : 01/14/2020
#
# Distributed under terms of the MIT license.
'''
To prepare the index file of the data_dir to pkl file, for later usage.
# Usage
python3 prepare_index.py -d $DATASET_DIR -o $OUTPUT_FILE_NAME -filter $FILTER
'''
import argparse
import glob
import os.path as osp
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', '-d', type=str, help='the dataset dir')
parser.add_argument('--output-file', '-o', type=str, required=True,
help='the output file')
parser.add_argument('--filter', '-filter', type=str, default='*',
help='the filter (default: all files)')
parser.add_argument('--verbose', '-v', action='store_true',
help='print the result')
args = parser.parse_args()
def main():
file_names = glob.glob(osp.join(args.data_dir, args.filter))
print('{} files listed'.format(len(file_names)))
base_names = list(map(lambda x: osp.basename(x), file_names))
if args.verbose:
print(base_names)
with open(args.output_file, 'wb') as f:
pickle.dump(base_names, f)
if __name__ == '__main__':
main()
| 26.73913 | 77 | 0.680488 |
ec7d3241a2832a9d8879480be400bba02c906a71 | 8,106 | py | Python | mvpa2/tests/test_zscoremapper.py | nno/PyMVPA | a125596bf81b8e9848768852f697bd3cff9674c4 | [
"MIT"
] | 227 | 2015-01-17T20:13:54.000Z | 2022-01-26T21:14:30.000Z | mvpa2/tests/test_zscoremapper.py | nno/PyMVPA | a125596bf81b8e9848768852f697bd3cff9674c4 | [
"MIT"
] | 364 | 2015-01-05T21:55:09.000Z | 2021-09-09T20:37:55.000Z | mvpa2/tests/test_zscoremapper.py | nno/PyMVPA | a125596bf81b8e9848768852f697bd3cff9674c4 | [
"MIT"
] | 111 | 2015-01-06T19:26:41.000Z | 2022-01-26T21:14:31.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA ZScore mapper"""
from mvpa2.base import externals
from mvpa2.support.copy import deepcopy
import numpy as np
from mvpa2.datasets.base import dataset_wizard
from mvpa2.mappers.zscore import ZScoreMapper, zscore
from mvpa2.testing.tools import assert_array_almost_equal, assert_array_equal, \
assert_equal, assert_raises, ok_, nodebug
from mvpa2.misc.support import idhash
from mvpa2.testing.datasets import datasets
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
@nodebug(['ID_IN_REPR', 'MODULE_IN_REPR'])
def test_zcore_repr():
# Just basic test if everything is sane... no proper comparison
for m in (ZScoreMapper(chunks_attr=None),
ZScoreMapper(params=(3, 1)),
ZScoreMapper()):
mr = eval(repr(m))
ok_(isinstance(mr, ZScoreMapper))
def test_zscore():
"""Test z-scoring transformation
"""
# dataset: mean=2, std=1
samples = np.array((0, 1, 3, 4, 2, 2, 3, 1, 1, 3, 3, 1, 2, 2, 2, 2)).\
reshape((16, 1))
data = dataset_wizard(samples.copy(), targets=range(16), chunks=[0] * 16)
assert_equal(data.samples.mean(), 2.0)
assert_equal(data.samples.std(), 1.0)
data_samples = data.samples.copy()
zscore(data, chunks_attr='chunks')
# copy should stay intact
assert_equal(data_samples.mean(), 2.0)
assert_equal(data_samples.std(), 1.0)
# we should be able to operate on ndarrays
# But we can't change type inplace for an array, can't we?
assert_raises(TypeError, zscore, data_samples, chunks_attr=None)
# so lets do manually
data_samples = data_samples.astype(float)
zscore(data_samples, chunks_attr=None)
assert_array_equal(data.samples, data_samples)
# check z-scoring
check = np.array([-2, -1, 1, 2, 0, 0, 1, -1, -1, 1, 1, -1, 0, 0, 0, 0],
dtype='float64').reshape(16, 1)
assert_array_equal(data.samples, check)
data = dataset_wizard(samples.copy(), targets=range(16), chunks=[0] * 16)
zscore(data, chunks_attr=None)
assert_array_equal(data.samples, check)
# check z-scoring taking set of labels as a baseline
data = dataset_wizard(samples.copy(),
targets=[0, 2, 2, 2, 1] + [2] * 11,
chunks=[0] * 16)
zscore(data, param_est=('targets', [0, 1]))
assert_array_equal(samples, data.samples + 1.0)
# check that zscore modifies in-place; only guaranteed if no upcasting is
# necessary
samples = samples.astype('float')
data = dataset_wizard(samples,
targets=[0, 2, 2, 2, 1] + [2] * 11,
chunks=[0] * 16)
zscore(data, param_est=('targets', [0, 1]))
assert_array_equal(samples, data.samples)
# verify that if param_est is set but chunks_attr is None
# performs zscoring across entire dataset correctly
data = data.copy()
data_01 = data.select({'targets': [0, 1]})
zscore(data_01, chunks_attr=None)
zscore(data, chunks_attr=None, param_est=('targets', [0, 1]))
assert_array_equal(data_01.samples, data.select({'targets': [0, 1]}))
# these might be duplicating code above -- but twice is better than nothing
# dataset: mean=2, std=1
raw = np.array((0, 1, 3, 4, 2, 2, 3, 1, 1, 3, 3, 1, 2, 2, 2, 2))
# dataset: mean=12, std=1
raw2 = np.array((0, 1, 3, 4, 2, 2, 3, 1, 1, 3, 3, 1, 2, 2, 2, 2)) + 10
# zscore target
check = [-2, -1, 1, 2, 0, 0, 1, -1, -1, 1, 1, -1, 0, 0, 0, 0]
ds = dataset_wizard(raw.copy(), targets=range(16), chunks=[0] * 16)
pristine = dataset_wizard(raw.copy(), targets=range(16), chunks=[0] * 16)
zm = ZScoreMapper()
# should do global zscore by default
zm.train(ds) # train
assert_array_almost_equal(zm.forward(ds), np.transpose([check]))
# should not modify the source
assert_array_equal(pristine, ds)
# if we tell it a different mean it should obey the order
zm = ZScoreMapper(params=(3,1))
zm.train(ds)
assert_array_almost_equal(zm.forward(ds), np.transpose([check]) - 1 )
assert_array_equal(pristine, ds)
# let's look at chunk-wise z-scoring
ds = dataset_wizard(np.hstack((raw.copy(), raw2.copy())),
targets=range(32),
chunks=[0] * 16 + [1] * 16)
# by default chunk-wise
zm = ZScoreMapper()
zm.train(ds) # train
assert_array_almost_equal(zm.forward(ds), np.transpose([check + check]))
# we should be able to do that same manually
zm = ZScoreMapper(params={0: (2,1), 1: (12,1)})
zm.train(ds) # train
assert_array_almost_equal(zm.forward(ds), np.transpose([check + check]))
# And just a smoke test for warnings reporting whenever # of
# samples per chunk is low.
# on 1 sample per chunk
zds1 = ZScoreMapper(chunks_attr='chunks', auto_train=True)(
ds[[0, -1]])
ok_(np.all(zds1.samples == 0)) # they all should be 0
# on 2 samples per chunk
zds2 = ZScoreMapper(chunks_attr='chunks', auto_train=True)(
ds[[0, 1, -10, -1]])
assert_array_equal(np.unique(zds2.samples), [-1., 1]) # they all should be -1 or 1
# on 3 samples per chunk -- different warning
ZScoreMapper(chunks_attr='chunks', auto_train=True)(
ds[[0, 1, 2, -3, -2, -1]])
# test if std provided as a list not as an array is handled
# properly -- should zscore all features (not just first/none
# as it was before)
ds = dataset_wizard(np.arange(32).reshape((8,-1)),
targets=range(8), chunks=[0] * 8)
means = [0, 1, -10, 10]
std0 = np.std(ds[:, 0]) # std deviation of first one
stds = [std0, 10, .1, 1]
zm = ZScoreMapper(params=(means, stds),
auto_train=True)
dsz = zm(ds)
assert_array_almost_equal((np.mean(ds, axis=0) - np.asanyarray(means))/np.array(stds),
np.mean(dsz, axis=0))
assert_array_almost_equal(np.std(ds, axis=0)/np.array(stds),
np.std(dsz, axis=0))
def test_zscore_withoutchunks():
# just a smoke test to see if all issues of
# https://github.com/PyMVPA/PyMVPA/issues/26
# are fixed
from mvpa2.datasets import Dataset
ds = Dataset(np.arange(32).reshape((8,-1)), sa=dict(targets=range(8)))
zscore(ds, chunks_attr=None)
assert(np.any(ds.samples != np.arange(32).reshape((8,-1))))
ds_summary = ds.summary()
assert(ds_summary is not None) | 39.349515 | 90 | 0.611152 |
306ca6df19a47081e707c80678549827a4f022ea | 10,712 | py | Python | scripts/run_neural_coin_dice.py | SnowflyLXF/FedDICE | a63a3233037e37ae27d6c130f37ffc4b92190d5e | [
"Apache-2.0"
] | null | null | null | scripts/run_neural_coin_dice.py | SnowflyLXF/FedDICE | a63a3233037e37ae27d6c130f37ffc4b92190d5e | [
"Apache-2.0"
] | null | null | null | scripts/run_neural_coin_dice.py | SnowflyLXF/FedDICE | a63a3233037e37ae27d6c130f37ffc4b92190d5e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running CoinDICE with neural network function approximators.
The default parameters here should reproduce the published reacher results. Make
sure to generate the reacher dataset prior to running this script (see
`scripts/create_dataset.py`). Furthermore, the user will need to feed in an
appropriate `divergence_limit`, which should be set to a desired chi2 percentile
divided by the size of the offline dataset (see paper for details). For example,
if a 90% confidence interval is desired and the offline dataset is 25
trajectories of length 100, then the divergence_limit should be 2.7055 / 2500.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import tensorflow_probability as tfp
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from dice_rl.environments.env_policies import get_target_policy
from dice_rl.estimators.neural_coin_dice import NeuralCoinDice
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.networks.value_network import ValueNetwork
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
# BEGIN GOOGLE-INTERNAL
import google3.learning.deepmind.xmanager2.client.google as xm
# END GOOGLE-INTERNAL
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'reacher', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 25,
'Number of trajectories to collect.')
flags.DEFINE_integer('max_trajectory_length', 100,
'Cutoff trajectory at this step.')
flags.DEFINE_float('alpha', 0.0,
'How close to target policy.')
flags.DEFINE_bool('tabular_obs', False,
'Whether to use tabular observations.')
flags.DEFINE_string('load_dir', None,
'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None,
'Directory to save results to.')
flags.DEFINE_float('gamma', 0.99,
'Discount factor.')
flags.DEFINE_float('nu_learning_rate', 0.01, 'Learning rate for nu.')
flags.DEFINE_float('zeta_learning_rate', 0.001, 'Learning rate for zeta.')
flags.DEFINE_float('nu_regularizer', 0.0, 'Ortho regularization on nu.')
flags.DEFINE_float('zeta_regularizer', 0.0, 'Ortho regularization on zeta.')
flags.DEFINE_float('weight_learning_rate', 0.001, 'Learning rate for weights.')
flags.DEFINE_float('divergence_limit', 0.001, 'Divergence limit.')
flags.DEFINE_float('algae_alpha', 0.01, 'Regularizer on Df(dpi|dD).')
flags.DEFINE_float('f_exponent', 1.5, 'Exponent for f function.')
flags.DEFINE_bool('primal_form', True,
'Whether to use primal form of loss for nu.')
flags.DEFINE_integer('num_steps', 200000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 4096, 'Batch size.')
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
alpha = FLAGS.alpha
load_dir = FLAGS.load_dir
save_dir = FLAGS.save_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
nu_learning_rate = FLAGS.nu_learning_rate
zeta_learning_rate = FLAGS.zeta_learning_rate
nu_regularizer = FLAGS.nu_regularizer
zeta_regularizer = FLAGS.zeta_regularizer
weight_learning_rate = FLAGS.weight_learning_rate
divergence_limit = FLAGS.divergence_limit
algae_alpha = FLAGS.algae_alpha
f_exponent = FLAGS.f_exponent
primal_form = FLAGS.primal_form
batch_size = FLAGS.batch_size
num_steps = FLAGS.num_steps
target_policy = get_target_policy(load_dir, env_name, tabular_obs)
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
all_steps = dataset.get_all_steps()
max_reward = tf.reduce_max(all_steps.reward)
min_reward = tf.reduce_min(all_steps.reward)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('min reward', min_reward, 'max reward', max_reward)
estimate = estimator_lib.get_fullbatch_average(dataset, gamma=gamma)
print('data per step avg', estimate)
train_hparam_str = ('nlr{NU_LR}_zlr{Z_LR}_batch{BATCH_SIZE}_'
'gam{GAMMA}_nreg{NU_REG}_zreg{Z_REG}_algae{ALGAE_ALPHA}_'
'prim{PRIMAL}_div{DIV}').format(
NU_LR=nu_learning_rate,
Z_LR=zeta_learning_rate,
BATCH_SIZE=batch_size,
GAMMA=gamma,
NU_REG=nu_regularizer,
Z_REG=zeta_regularizer,
ALGAE_ALPHA=algae_alpha,
PRIMAL=primal_form,
DIV=divergence_limit)
if save_dir is not None:
save_dir = os.path.join(save_dir, hparam_str, train_hparam_str)
summary_writer = tf.summary.create_file_writer(logdir=save_dir)
else:
summary_writer = tf.summary.create_noop_writer()
activation_fn = tf.nn.relu
kernel_initializer = tf.keras.initializers.TruncatedNormal(
stddev=0.5, seed=1)
hidden_dims = (64,)
n_intervals = 1
nu_network = ValueNetwork((dataset.spec.observation, dataset.spec.action),
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=None,
output_dim=2 * 2 * n_intervals)
zeta_network = ValueNetwork((dataset.spec.observation, dataset.spec.action),
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=None,
output_dim=2 * 2 * n_intervals)
weight_network = ValueNetwork((dataset.spec.observation, # initial state
dataset.spec.observation, # cur state
dataset.spec.action, # cur action
dataset.spec.observation), # next state
fc_layer_params=hidden_dims,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
last_kernel_initializer=None,
output_dim=2 * n_intervals)
nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate, beta_2=0.99)
zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate, beta_2=0.99)
weight_optimizer = tf.keras.optimizers.Adam(weight_learning_rate, beta_2=0.99)
estimator = NeuralCoinDice(dataset.spec,
nu_network, zeta_network,
weight_network,
nu_optimizer, zeta_optimizer,
weight_optimizer,
gamma=gamma,
divergence_limit=divergence_limit,
f_exponent=f_exponent,
primal_form=primal_form,
nu_regularizer=nu_regularizer,
zeta_regularizer=zeta_regularizer,
algae_alpha=algae_alpha * np.array([1, 1]),
unbias_algae_alpha=False,
closed_form_weights=True,
num_samples=None)
global_step = tf.Variable(0, dtype=tf.int64)
tf.summary.experimental.set_step(global_step)
@tf.function
def one_step(transitions_batch, initial_steps_batch):
global_step.assign_add(1)
with tf.summary.record_if(tf.math.mod(global_step, 25) == 0):
initial_steps_batch = tf.nest.map_structure(lambda t: t[:, 0, ...],
initial_steps_batch)
losses, _ = estimator.train_step(initial_steps_batch, transitions_batch,
target_policy)
return losses
with summary_writer.as_default():
running_losses = []
running_estimates = []
for step in range(num_steps):
transitions_batch = dataset.get_step(batch_size, num_steps=2)
initial_steps_batch, _ = dataset.get_episode(
batch_size, truncate_episode_at=1)
losses = one_step(transitions_batch, initial_steps_batch)
running_losses.append([t.numpy() for t in losses])
if step % 500 == 0 or step == num_steps - 1:
print('step', step, 'losses', np.mean(running_losses, 0))
estimate = np.mean(running_losses, 0)[0]
for idx, est in enumerate(estimate):
tf.summary.scalar('estimate%d' % idx, est)
running_estimates.append(estimate)
print('estimated confidence interval %s' % estimate)
print('avg last 3 estimated confidence interval %s' %
np.mean(running_estimates[-3:], axis=0))
running_losses = []
if save_dir is not None:
results_filename = os.path.join(save_dir, 'results.npy')
with tf.io.gfile.GFile(results_filename, 'w') as f:
np.save(f, running_estimates)
print('Done!')
if __name__ == '__main__':
app.run(main)
| 43.722449 | 80 | 0.659354 |
30bd6b3747e955022c8a03921fa630d63795fb3b | 990 | py | Python | DQMOffline/Muon/python/MuDepositEnergyMonitoring_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQMOffline/Muon/python/MuDepositEnergyMonitoring_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQMOffline/Muon/python/MuDepositEnergyMonitoring_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
# MuDepositEnergyMonitoring
muDepEnergyMonitoring = cms.EDAnalyzer("MuDepositEnergyMonitoring",
hadS9SizeMin = cms.double(-0.5),
OutputMEsInRootFile = cms.bool(False),
CosmicsCollectionLabel = cms.InputTag("muons"),
emS9SizeMin = cms.double(-0.5),
hoS9SizeMax = cms.double(3.0),
hoS9SizeBin = cms.int32(1000),
hoSizeMin = cms.double(-0.5),
hadS9SizeMax = cms.double(3.0),
hadSizeMin = cms.double(-0.5),
hoSizeBin = cms.int32(1000),
emS9SizeBin = cms.int32(1000),
hoSizeMax = cms.double(3.0),
OutputFileName = cms.string('MuDepositEnergyMonitoring.root'),
emSizeBin = cms.int32(1000),
hadS9SizeBin = cms.int32(1000),
emS9SizeMax = cms.double(3.0),
AlgoName = cms.string('sta'),
emSizeMin = cms.double(-0.5),
emSizeMax = cms.double(3.0),
hoS9SizeMin = cms.double(-0.5),
hadSizeBin = cms.int32(1000),
debug = cms.bool(True),
hadSizeMax = cms.double(3.0)
)
| 30.9375 | 67 | 0.672727 |
54767a9264e15e80e41aa3383d51633551eda2d0 | 5,706 | py | Python | data/p3BR/R2/benchmark/startQiskit_noisy124.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy124.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy124.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=21
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=18
prog.cz(input_qubit[0],input_qubit[2]) # number=19
prog.h(input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=12
prog.cx(input_qubit[0],input_qubit[2]) # number=13
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=14
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy124.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.564767 | 140 | 0.629863 |
9c5a90bbef3056d993d1ad1ca4a68fbc053fed8e | 718 | py | Python | venv/Lib/site-packages/PySide2/_config.py | okaykacar/TennisBall-Tracking | 1c4d1eb8c376c835ae8942580104d688399534e4 | [
"Unlicense"
] | 1 | 2022-03-14T22:41:42.000Z | 2022-03-14T22:41:42.000Z | venv/Lib/site-packages/PySide2/_config.py | okaykacar/TennisBall-Tracking | 1c4d1eb8c376c835ae8942580104d688399534e4 | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/PySide2/_config.py | okaykacar/TennisBall-Tracking | 1c4d1eb8c376c835ae8942580104d688399534e4 | [
"Unlicense"
] | null | null | null | built_modules = list(name for name in
"Core;Gui;Widgets;PrintSupport;Sql;Network;Test;Concurrent;WinExtras;Xml;XmlPatterns;Help;Multimedia;MultimediaWidgets;OpenGL;OpenGLFunctions;Positioning;Location;Qml;Quick;QuickControls2;QuickWidgets;RemoteObjects;Scxml;Script;ScriptTools;Sensors;SerialPort;TextToSpeech;Charts;Svg;DataVisualization;UiTools;AxContainer;WebChannel;WebEngineCore;WebEngine;WebEngineWidgets;WebSockets;3DCore;3DRender;3DInput;3DLogic;3DAnimation;3DExtras"
.split(";"))
shiboken_library_soversion = str(5.15)
pyside_library_soversion = str(5.15)
version = "5.15.2"
version_info = (5, 15, 2, "", "")
__build_date__ = '2020-11-12T16:42:21+00:00'
__setup_py_package_version__ = '5.15.2'
| 42.235294 | 441 | 0.807799 |
745818396da810a5e44152fc7e4c2db9e1b6ad7d | 137 | py | Python | Desafio1.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
] | null | null | null | Desafio1.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
] | null | null | null | Desafio1.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
] | null | null | null | nome=input('Qual é o seu nome? ').strip()
print(' ', '\033[1;31m', nome, '\033[m', '\033[32m', '\n', 'Prazer em te conhecer!', '\033[m') | 68.5 | 95 | 0.554745 |
1781193f61f1476a57760cf8073b06b877358d59 | 10,566 | py | Python | openstackclient/network/v2/network_rbac.py | cloudification-io/python-openstackclient | e07324e30fbb24e89fd63d1c5a5fe485f693a45c | [
"Apache-2.0"
] | 5 | 2015-02-26T18:03:07.000Z | 2017-05-01T20:17:20.000Z | openstackclient/network/v2/network_rbac.py | cloudification-io/python-openstackclient | e07324e30fbb24e89fd63d1c5a5fe485f693a45c | [
"Apache-2.0"
] | null | null | null | openstackclient/network/v2/network_rbac.py | cloudification-io/python-openstackclient | e07324e30fbb24e89fd63d1c5a5fe485f693a45c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""RBAC action implementations"""
import logging
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import sdk_utils
LOG = logging.getLogger(__name__)
_formatters = {
'location': format_columns.DictColumn,
}
def _get_columns(item):
column_map = {
'target_tenant': 'target_project_id',
'tenant_id': 'project_id',
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
def _get_attrs(client_manager, parsed_args):
attrs = {}
attrs['object_type'] = parsed_args.type
attrs['action'] = parsed_args.action
network_client = client_manager.network
if parsed_args.type == 'network':
object_id = network_client.find_network(
parsed_args.rbac_object, ignore_missing=False).id
if parsed_args.type == 'qos_policy':
object_id = network_client.find_qos_policy(
parsed_args.rbac_object,
ignore_missing=False).id
if parsed_args.type == 'security_group':
object_id = network_client.find_security_group(
parsed_args.rbac_object,
ignore_missing=False).id
attrs['object_id'] = object_id
identity_client = client_manager.identity
if parsed_args.target_project is not None:
project_id = identity_common.find_project(
identity_client,
parsed_args.target_project,
parsed_args.target_project_domain,
).id
elif parsed_args.target_all_projects:
project_id = '*'
attrs['target_tenant'] = project_id
if parsed_args.project is not None:
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['tenant_id'] = project_id
return attrs
# TODO(abhiraut): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class CreateNetworkRBAC(command.ShowOne):
_description = _("Create network RBAC policy")
def get_parser(self, prog_name):
parser = super(CreateNetworkRBAC, self).get_parser(prog_name)
parser.add_argument(
'rbac_object',
metavar="<rbac-object>",
help=_("The object to which this RBAC policy affects (name or ID)")
)
parser.add_argument(
'--type',
metavar="<type>",
required=True,
choices=['security_group', 'qos_policy', 'network'],
help=_('Type of the object that RBAC policy '
'affects ("security_group", "qos_policy" or "network")')
)
parser.add_argument(
'--action',
metavar="<action>",
required=True,
choices=['access_as_external', 'access_as_shared'],
help=_('Action for the RBAC policy '
'("access_as_external" or "access_as_shared")')
)
target_project_group = parser.add_mutually_exclusive_group(
required=True)
target_project_group.add_argument(
'--target-project',
metavar="<target-project>",
help=_('The project to which the RBAC policy '
'will be enforced (name or ID)')
)
target_project_group.add_argument(
'--target-all-projects',
action='store_true',
help=_('Allow creating RBAC policy for all projects.')
)
parser.add_argument(
'--target-project-domain',
metavar='<target-project-domain>',
help=_('Domain the target project belongs to (name or ID). '
'This can be used in case collisions between project names '
'exist.'),
)
parser.add_argument(
'--project',
metavar="<project>",
help=_('The owner project (name or ID)')
)
identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
attrs = _get_attrs(self.app.client_manager, parsed_args)
obj = client.create_rbac_policy(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteNetworkRBAC(command.Command):
_description = _("Delete network RBAC policy(s)")
def get_parser(self, prog_name):
parser = super(DeleteNetworkRBAC, self).get_parser(prog_name)
parser.add_argument(
'rbac_policy',
metavar="<rbac-policy>",
nargs='+',
help=_("RBAC policy(s) to delete (ID only)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
result = 0
for rbac in parsed_args.rbac_policy:
try:
obj = client.find_rbac_policy(rbac, ignore_missing=False)
client.delete_rbac_policy(obj)
except Exception as e:
result += 1
LOG.error(_("Failed to delete RBAC policy with "
"ID '%(rbac)s': %(e)s"),
{'rbac': rbac, 'e': e})
if result > 0:
total = len(parsed_args.rbac_policy)
msg = (_("%(result)s of %(total)s RBAC policies failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListNetworkRBAC(command.Lister):
_description = _("List network RBAC policies")
def get_parser(self, prog_name):
parser = super(ListNetworkRBAC, self).get_parser(prog_name)
parser.add_argument(
'--type',
metavar='<type>',
choices=['security_group', 'qos_policy', 'network'],
help=_('List network RBAC policies according to '
'given object type ("security_group", "qos_policy" '
'or "network")')
)
parser.add_argument(
'--action',
metavar='<action>',
choices=['access_as_external', 'access_as_shared'],
help=_('List network RBAC policies according to given '
'action ("access_as_external" or "access_as_shared")')
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
'id',
'object_type',
'object_id',
)
column_headers = (
'ID',
'Object Type',
'Object ID',
)
query = {}
if parsed_args.long:
columns += ('action',)
column_headers += ('Action',)
if parsed_args.type is not None:
query['object_type'] = parsed_args.type
if parsed_args.action is not None:
query['action'] = parsed_args.action
data = client.rbac_policies(**query)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
# TODO(abhiraut): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class SetNetworkRBAC(command.Command):
_description = _("Set network RBAC policy properties")
def get_parser(self, prog_name):
parser = super(SetNetworkRBAC, self).get_parser(prog_name)
parser.add_argument(
'rbac_policy',
metavar="<rbac-policy>",
help=_("RBAC policy to be modified (ID only)")
)
parser.add_argument(
'--target-project',
metavar="<target-project>",
help=_('The project to which the RBAC policy '
'will be enforced (name or ID)')
)
parser.add_argument(
'--target-project-domain',
metavar='<target-project-domain>',
help=_('Domain the target project belongs to (name or ID). '
'This can be used in case collisions between project names '
'exist.'),
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_rbac_policy(parsed_args.rbac_policy,
ignore_missing=False)
attrs = {}
if parsed_args.target_project:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.target_project,
parsed_args.target_project_domain,
).id
attrs['target_tenant'] = project_id
client.update_rbac_policy(obj, **attrs)
class ShowNetworkRBAC(command.ShowOne):
_description = _("Display network RBAC policy details")
def get_parser(self, prog_name):
parser = super(ShowNetworkRBAC, self).get_parser(prog_name)
parser.add_argument(
'rbac_policy',
metavar="<rbac-policy>",
help=_("RBAC policy (ID only)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_rbac_policy(parsed_args.rbac_policy,
ignore_missing=False)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return display_columns, data
| 34.756579 | 79 | 0.601079 |
a1424327f2cafde7eeb71366a33011d60ed80971 | 1,314 | py | Python | observations/r/sp500_w90.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | observations/r/sp500_w90.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | observations/r/sp500_w90.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def sp500_w90(path):
"""Closing Numbers for S and P 500 Index - First 100 Days of 1990
Closing numbers for S and P 500 Index, Jan. 1, 1990 through early 2000.
Derived from SP500 in the MASS library.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `sp500_w90.csv`.
Returns:
Tuple of np.ndarray `x_train` with 100 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'sp500_w90.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/SP500W90.csv'
maybe_download_and_extract(path, url,
save_file_name='sp500_w90.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 27.957447 | 73 | 0.685693 |
689eae0ef29835bcd96ca03c387c9d1cbd73f028 | 4,093 | py | Python | clinica/iotools/converters/adni_to_bids/adni_modalities/adni_pib_pet.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | 135 | 2019-05-17T14:16:40.000Z | 2022-03-19T03:08:05.000Z | clinica/iotools/converters/adni_to_bids/adni_modalities/adni_pib_pet.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | 391 | 2019-06-03T09:32:17.000Z | 2022-03-31T15:10:26.000Z | clinica/iotools/converters/adni_to_bids/adni_modalities/adni_pib_pet.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | 57 | 2019-05-20T08:38:01.000Z | 2022-02-11T12:14:32.000Z | # coding: utf-8
"""Module for converting PIB PET of ADNI."""
def convert_adni_pib_pet(
source_dir, csv_dir, dest_dir, conversion_dir, subjs_list=None, mod_to_update=False
):
"""Convert PIB PET images of ADNI into BIDS format.
Args:
source_dir: path to the ADNI directory
csv_dir: path to the clinical data directory
dest_dir: path to the destination BIDS directory
conversion_dir: path to the TSV files including the paths to original images
subjs_list: subjects list
mod_to_update: If True, pre-existing images in the BIDS directory will be erased and extracted again.
"""
from os import path
import pandas as pd
from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids
from clinica.utils.stream import cprint
if not subjs_list:
adni_merge_path = path.join(csv_dir, "ADNIMERGE.csv")
adni_merge = pd.read_csv(adni_merge_path, sep=",", low_memory=False)
subjs_list = list(adni_merge.PTID.unique())
cprint(
f"Calculating paths of PIB PET images. Output will be stored in {conversion_dir}."
)
images = compute_pib_pet_paths(
source_dir, csv_dir, dest_dir, subjs_list, conversion_dir
)
cprint("Paths of PIB PET images found. Exporting images into BIDS ...")
paths_to_bids(images, dest_dir, "pib", mod_to_update=mod_to_update)
cprint(msg="PIB PET conversion done.", lvl="debug")
def compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir):
"""Compute the paths to the PIB PET images and store them in a TSV file.
Args:
source_dir: path to the ADNI directory
csv_dir: path to the clinical data directory
dest_dir: path to the destination BIDS directory
subjs_list: subjects list
conversion_dir: path to the TSV files including the paths to original images
Returns:
images: a dataframe with all the paths to the PET images that will be converted into BIDS
"""
from os import path
import pandas as pd
from clinica.iotools.converters.adni_to_bids.adni_utils import (
find_image_path,
get_images_pet,
)
pet_pib_col = [
"Phase",
"Subject_ID",
"VISCODE",
"Visit",
"Sequence",
"Scan_Date",
"Study_ID",
"Series_ID",
"Image_ID",
"Original",
]
pet_pib_df = pd.DataFrame(columns=pet_pib_col)
pet_pib_dfs_list = []
# Loading needed .csv files
pibqc = pd.read_csv(path.join(csv_dir, "PIBQC.csv"), sep=",", low_memory=False)
pet_meta_list = pd.read_csv(
path.join(csv_dir, "PET_META_LIST.csv"), sep=",", low_memory=False
)
for subj in subjs_list:
# PET images metadata for subject
subject_pet_meta = pet_meta_list[pet_meta_list["Subject"] == subj]
if subject_pet_meta.empty:
continue
# QC for PIB PET images
pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))]
sequences_preprocessing_step = ["PIB Co-registered, Averaged"]
subj_dfs_list = get_images_pet(
subj,
pet_qc_subj,
subject_pet_meta,
pet_pib_col,
"PIB-PET",
sequences_preprocessing_step,
viscode_field="VISCODE",
)
if subj_dfs_list:
pet_pib_dfs_list += subj_dfs_list
if pet_pib_dfs_list:
pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True)
# Exceptions
# ==========
conversion_errors = []
# Removing known exceptions from images to convert
if not pet_pib_df.empty:
error_ind = pet_pib_df.index[
pet_pib_df.apply(
lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1
)
]
pet_pib_df.drop(error_ind, inplace=True)
images = find_image_path(pet_pib_df, source_dir, "PIB", "I", "Image_ID")
images.to_csv(path.join(conversion_dir, "pib_pet_paths.tsv"), sep="\t", index=False)
return images
| 31.728682 | 109 | 0.64989 |
51abfe09d60d3f9fd0b655e74ea3ed893f16f210 | 45 | py | Python | crane_controllers/external/casadi-3.4.5/swig/extending_casadi/__init__.py | tingelst/crane | e14bca2bd4e2397dce09180029223832aad9b070 | [
"MIT"
] | 2 | 2021-03-22T08:50:29.000Z | 2021-08-18T03:04:18.000Z | crane_controllers/external/casadi-3.4.5/swig/extending_casadi/__init__.py | tingelst/crane | e14bca2bd4e2397dce09180029223832aad9b070 | [
"MIT"
] | null | null | null | crane_controllers/external/casadi-3.4.5/swig/extending_casadi/__init__.py | tingelst/crane | e14bca2bd4e2397dce09180029223832aad9b070 | [
"MIT"
] | 2 | 2022-01-14T04:28:41.000Z | 2022-01-14T05:29:01.000Z | import casadi
from extending_casadi import *
| 15 | 30 | 0.844444 |
ef82642ba4afc31f453b8c32fd1327396d0c66b4 | 24,236 | py | Python | models_SHOT_convex/syn20hfsg.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | null | null | null | models_SHOT_convex/syn20hfsg.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | null | null | null | models_SHOT_convex/syn20hfsg.py | grossmann-group/pyomo-MINLP-benchmarking | 714f0a0dffd61675649a805683c0627af6b4929e | [
"MIT"
] | null | null | null | # MINLP written by GAMS Convert at 01/15/21 11:37:32
#
# Equation counts
# Total E G L N X C B
# 234 107 27 100 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 152 132 20 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 534 492 42 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= 5*m.x8 + 200*m.x38 + 250*m.x39 + 200*m.x40 + 700*m.x41 + 400*m.x42 + 500*m.x43 + 400*m.x44
+ 600*m.x45 + 700*m.x46 - 5*m.b133 - 8*m.b134 - 6*m.b135 - 10*m.b136 - 6*m.b137 - 7*m.b138
- 4*m.b139 - 5*m.b140 - 2*m.b141 - 4*m.b142 - 3*m.b143 - 7*m.b144 - 3*m.b145 - 2*m.b146
- 4*m.b147 - 2*m.b148 - 3*m.b149 - 5*m.b150 - 2*m.b151 - 8*m.b152, sense=maximize)
m.c2 = Constraint(expr= m.x2 - m.x3 - m.x4 == 0)
m.c3 = Constraint(expr= - m.x5 - m.x6 + m.x7 == 0)
m.c4 = Constraint(expr= m.x7 - m.x8 - m.x9 == 0)
m.c5 = Constraint(expr= m.x9 - m.x10 - m.x11 - m.x12 == 0)
m.c6 = Constraint(expr= m.x14 - m.x17 - m.x18 == 0)
m.c7 = Constraint(expr= m.x16 - m.x19 - m.x20 - m.x21 == 0)
m.c8 = Constraint(expr= m.x24 - m.x28 - m.x29 == 0)
m.c9 = Constraint(expr= - m.x25 - m.x31 + m.x32 == 0)
m.c10 = Constraint(expr= m.x26 - m.x33 - m.x34 == 0)
m.c11 = Constraint(expr= m.x27 - m.x35 - m.x36 - m.x37 == 0)
m.c12 = Constraint(expr=(m.x51/(0.001 + 0.999*m.b133) - log(1 + m.x47/(0.001 + 0.999*m.b133)))*(0.001 + 0.999*m.b133)
<= 0)
m.c13 = Constraint(expr= m.x48 == 0)
m.c14 = Constraint(expr= m.x52 == 0)
m.c15 = Constraint(expr= m.x3 - m.x47 - m.x48 == 0)
m.c16 = Constraint(expr= m.x5 - m.x51 - m.x52 == 0)
m.c17 = Constraint(expr= m.x47 - 10*m.b133 <= 0)
m.c18 = Constraint(expr= m.x48 + 10*m.b133 <= 10)
m.c19 = Constraint(expr= m.x51 - 2.39789527279837*m.b133 <= 0)
m.c20 = Constraint(expr= m.x52 + 2.39789527279837*m.b133 <= 2.39789527279837)
m.c21 = Constraint(expr=(m.x53/(0.001 + 0.999*m.b134) - 1.2*log(1 + m.x49/(0.001 + 0.999*m.b134)))*(0.001 + 0.999*m.b134
) <= 0)
m.c22 = Constraint(expr= m.x50 == 0)
m.c23 = Constraint(expr= m.x54 == 0)
m.c24 = Constraint(expr= m.x4 - m.x49 - m.x50 == 0)
m.c25 = Constraint(expr= m.x6 - m.x53 - m.x54 == 0)
m.c26 = Constraint(expr= m.x49 - 10*m.b134 <= 0)
m.c27 = Constraint(expr= m.x50 + 10*m.b134 <= 10)
m.c28 = Constraint(expr= m.x53 - 2.87747432735804*m.b134 <= 0)
m.c29 = Constraint(expr= m.x54 + 2.87747432735804*m.b134 <= 2.87747432735804)
m.c30 = Constraint(expr= - 0.75*m.x55 + m.x63 == 0)
m.c31 = Constraint(expr= m.x56 == 0)
m.c32 = Constraint(expr= m.x64 == 0)
m.c33 = Constraint(expr= m.x10 - m.x55 - m.x56 == 0)
m.c34 = Constraint(expr= m.x14 - m.x63 - m.x64 == 0)
m.c35 = Constraint(expr= m.x55 - 2.87747432735804*m.b135 <= 0)
m.c36 = Constraint(expr= m.x56 + 2.87747432735804*m.b135 <= 2.87747432735804)
m.c37 = Constraint(expr= m.x63 - 2.15810574551853*m.b135 <= 0)
m.c38 = Constraint(expr= m.x64 + 2.15810574551853*m.b135 <= 2.15810574551853)
m.c39 = Constraint(expr=(m.x65/(0.001 + 0.999*m.b136) - 1.5*log(1 + m.x57/(0.001 + 0.999*m.b136)))*(0.001 + 0.999*m.b136
) <= 0)
m.c40 = Constraint(expr= m.x58 == 0)
m.c41 = Constraint(expr= m.x67 == 0)
m.c42 = Constraint(expr= m.x11 - m.x57 - m.x58 == 0)
m.c43 = Constraint(expr= m.x15 - m.x65 - m.x67 == 0)
m.c44 = Constraint(expr= m.x57 - 2.87747432735804*m.b136 <= 0)
m.c45 = Constraint(expr= m.x58 + 2.87747432735804*m.b136 <= 2.87747432735804)
m.c46 = Constraint(expr= m.x65 - 2.03277599268042*m.b136 <= 0)
m.c47 = Constraint(expr= m.x67 + 2.03277599268042*m.b136 <= 2.03277599268042)
m.c48 = Constraint(expr= - m.x59 + m.x69 == 0)
m.c49 = Constraint(expr= - 0.5*m.x61 + m.x69 == 0)
m.c50 = Constraint(expr= m.x60 == 0)
m.c51 = Constraint(expr= m.x62 == 0)
m.c52 = Constraint(expr= m.x70 == 0)
m.c53 = Constraint(expr= m.x12 - m.x59 - m.x60 == 0)
m.c54 = Constraint(expr= m.x13 - m.x61 - m.x62 == 0)
m.c55 = Constraint(expr= m.x16 - m.x69 - m.x70 == 0)
m.c56 = Constraint(expr= m.x59 - 2.87747432735804*m.b137 <= 0)
m.c57 = Constraint(expr= m.x60 + 2.87747432735804*m.b137 <= 2.87747432735804)
m.c58 = Constraint(expr= m.x61 - 7*m.b137 <= 0)
m.c59 = Constraint(expr= m.x62 + 7*m.b137 <= 7)
m.c60 = Constraint(expr= m.x69 - 3.5*m.b137 <= 0)
m.c61 = Constraint(expr= m.x70 + 3.5*m.b137 <= 3.5)
m.c62 = Constraint(expr=(m.x81/(0.001 + 0.999*m.b138) - 1.25*log(1 + m.x71/(0.001 + 0.999*m.b138)))*(0.001 + 0.999*
m.b138) <= 0)
m.c63 = Constraint(expr= m.x72 == 0)
m.c64 = Constraint(expr= m.x83 == 0)
m.c65 = Constraint(expr= m.x17 - m.x71 - m.x72 == 0)
m.c66 = Constraint(expr= m.x22 - m.x81 - m.x83 == 0)
m.c67 = Constraint(expr= m.x71 - 2.15810574551853*m.b138 <= 0)
m.c68 = Constraint(expr= m.x72 + 2.15810574551853*m.b138 <= 2.15810574551853)
m.c69 = Constraint(expr= m.x81 - 1.43746550029693*m.b138 <= 0)
m.c70 = Constraint(expr= m.x83 + 1.43746550029693*m.b138 <= 1.43746550029693)
m.c71 = Constraint(expr=(m.x85/(0.001 + 0.999*m.b139) - 0.9*log(1 + m.x73/(0.001 + 0.999*m.b139)))*(0.001 + 0.999*m.b139
) <= 0)
m.c72 = Constraint(expr= m.x74 == 0)
m.c73 = Constraint(expr= m.x87 == 0)
m.c74 = Constraint(expr= m.x18 - m.x73 - m.x74 == 0)
m.c75 = Constraint(expr= m.x23 - m.x85 - m.x87 == 0)
m.c76 = Constraint(expr= m.x73 - 2.15810574551853*m.b139 <= 0)
m.c77 = Constraint(expr= m.x74 + 2.15810574551853*m.b139 <= 2.15810574551853)
m.c78 = Constraint(expr= m.x85 - 1.03497516021379*m.b139 <= 0)
m.c79 = Constraint(expr= m.x87 + 1.03497516021379*m.b139 <= 1.03497516021379)
m.c80 = Constraint(expr=(m.x89/(0.001 + 0.999*m.b140) - log(1 + m.x66/(0.001 + 0.999*m.b140)))*(0.001 + 0.999*m.b140)
<= 0)
m.c81 = Constraint(expr= m.x68 == 0)
m.c82 = Constraint(expr= m.x90 == 0)
m.c83 = Constraint(expr= m.x15 - m.x66 - m.x68 == 0)
m.c84 = Constraint(expr= m.x24 - m.x89 - m.x90 == 0)
m.c85 = Constraint(expr= m.x66 - 2.03277599268042*m.b140 <= 0)
m.c86 = Constraint(expr= m.x68 + 2.03277599268042*m.b140 <= 2.03277599268042)
m.c87 = Constraint(expr= m.x89 - 1.10947836929589*m.b140 <= 0)
m.c88 = Constraint(expr= m.x90 + 1.10947836929589*m.b140 <= 1.10947836929589)
m.c89 = Constraint(expr= - 0.9*m.x75 + m.x91 == 0)
m.c90 = Constraint(expr= m.x76 == 0)
m.c91 = Constraint(expr= m.x92 == 0)
m.c92 = Constraint(expr= m.x19 - m.x75 - m.x76 == 0)
m.c93 = Constraint(expr= m.x25 - m.x91 - m.x92 == 0)
m.c94 = Constraint(expr= m.x75 - 3.5*m.b141 <= 0)
m.c95 = Constraint(expr= m.x76 + 3.5*m.b141 <= 3.5)
m.c96 = Constraint(expr= m.x91 - 3.15*m.b141 <= 0)
m.c97 = Constraint(expr= m.x92 + 3.15*m.b141 <= 3.15)
m.c98 = Constraint(expr= - 0.6*m.x77 + m.x93 == 0)
m.c99 = Constraint(expr= m.x78 == 0)
m.c100 = Constraint(expr= m.x94 == 0)
m.c101 = Constraint(expr= m.x20 - m.x77 - m.x78 == 0)
m.c102 = Constraint(expr= m.x26 - m.x93 - m.x94 == 0)
m.c103 = Constraint(expr= m.x77 - 3.5*m.b142 <= 0)
m.c104 = Constraint(expr= m.x78 + 3.5*m.b142 <= 3.5)
m.c105 = Constraint(expr= m.x93 - 2.1*m.b142 <= 0)
m.c106 = Constraint(expr= m.x94 + 2.1*m.b142 <= 2.1)
m.c107 = Constraint(expr=(m.x95/(0.001 + 0.999*m.b143) - 1.1*log(1 + m.x79/(0.001 + 0.999*m.b143)))*(0.001 + 0.999*
m.b143) <= 0)
m.c108 = Constraint(expr= m.x80 == 0)
m.c109 = Constraint(expr= m.x96 == 0)
m.c110 = Constraint(expr= m.x21 - m.x79 - m.x80 == 0)
m.c111 = Constraint(expr= m.x27 - m.x95 - m.x96 == 0)
m.c112 = Constraint(expr= m.x79 - 3.5*m.b143 <= 0)
m.c113 = Constraint(expr= m.x80 + 3.5*m.b143 <= 3.5)
m.c114 = Constraint(expr= m.x95 - 1.6544851364539*m.b143 <= 0)
m.c115 = Constraint(expr= m.x96 + 1.6544851364539*m.b143 <= 1.6544851364539)
m.c116 = Constraint(expr= - 0.9*m.x82 + m.x115 == 0)
m.c117 = Constraint(expr= - m.x101 + m.x115 == 0)
m.c118 = Constraint(expr= m.x84 == 0)
m.c119 = Constraint(expr= m.x102 == 0)
m.c120 = Constraint(expr= m.x116 == 0)
m.c121 = Constraint(expr= m.x22 - m.x82 - m.x84 == 0)
m.c122 = Constraint(expr= m.x30 - m.x101 - m.x102 == 0)
m.c123 = Constraint(expr= m.x38 - m.x115 - m.x116 == 0)
m.c124 = Constraint(expr= m.x82 - 1.43746550029693*m.b144 <= 0)
m.c125 = Constraint(expr= m.x84 + 1.43746550029693*m.b144 <= 1.43746550029693)
m.c126 = Constraint(expr= m.x101 - 5*m.b144 <= 0)
m.c127 = Constraint(expr= m.x102 + 5*m.b144 <= 5)
m.c128 = Constraint(expr= m.x115 - 5*m.b144 <= 0)
m.c129 = Constraint(expr= m.x116 + 5*m.b144 <= 5)
m.c130 = Constraint(expr=(m.x117/(0.001 + 0.999*m.b145) - log(1 + m.x86/(0.001 + 0.999*m.b145)))*(0.001 + 0.999*m.b145)
<= 0)
m.c131 = Constraint(expr= m.x88 == 0)
m.c132 = Constraint(expr= m.x118 == 0)
m.c133 = Constraint(expr= m.x23 - m.x86 - m.x88 == 0)
m.c134 = Constraint(expr= m.x39 - m.x117 - m.x118 == 0)
m.c135 = Constraint(expr= m.x86 - 1.03497516021379*m.b145 <= 0)
m.c136 = Constraint(expr= m.x88 + 1.03497516021379*m.b145 <= 1.03497516021379)
m.c137 = Constraint(expr= m.x117 - 0.710483612536911*m.b145 <= 0)
m.c138 = Constraint(expr= m.x118 + 0.710483612536911*m.b145 <= 0.710483612536911)
m.c139 = Constraint(expr=(m.x119/(0.001 + 0.999*m.b146) - 0.7*log(1 + m.x97/(0.001 + 0.999*m.b146)))*(0.001 + 0.999*
m.b146) <= 0)
m.c140 = Constraint(expr= m.x98 == 0)
m.c141 = Constraint(expr= m.x120 == 0)
m.c142 = Constraint(expr= m.x28 - m.x97 - m.x98 == 0)
m.c143 = Constraint(expr= m.x40 - m.x119 - m.x120 == 0)
m.c144 = Constraint(expr= m.x97 - 1.10947836929589*m.b146 <= 0)
m.c145 = Constraint(expr= m.x98 + 1.10947836929589*m.b146 <= 1.10947836929589)
m.c146 = Constraint(expr= m.x119 - 0.522508489006913*m.b146 <= 0)
m.c147 = Constraint(expr= m.x120 + 0.522508489006913*m.b146 <= 0.522508489006913)
m.c148 = Constraint(expr=(m.x121/(0.001 + 0.999*m.b147) - 0.65*log(1 + m.x99/(0.001 + 0.999*m.b147)))*(0.001 + 0.999*
m.b147) <= 0)
m.c149 = Constraint(expr=(m.x121/(0.001 + 0.999*m.b147) - 0.65*log(1 + m.x103/(0.001 + 0.999*m.b147)))*(0.001 + 0.999*
m.b147) <= 0)
m.c150 = Constraint(expr= m.x100 == 0)
m.c151 = Constraint(expr= m.x104 == 0)
m.c152 = Constraint(expr= m.x122 == 0)
m.c153 = Constraint(expr= m.x29 - m.x99 - m.x100 == 0)
m.c154 = Constraint(expr= m.x32 - m.x103 - m.x104 == 0)
m.c155 = Constraint(expr= m.x41 - m.x121 - m.x122 == 0)
m.c156 = Constraint(expr= m.x99 - 1.10947836929589*m.b147 <= 0)
m.c157 = Constraint(expr= m.x100 + 1.10947836929589*m.b147 <= 1.10947836929589)
m.c158 = Constraint(expr= m.x103 - 8.15*m.b147 <= 0)
m.c159 = Constraint(expr= m.x104 + 8.15*m.b147 <= 8.15)
m.c160 = Constraint(expr= m.x121 - 1.43894002153683*m.b147 <= 0)
m.c161 = Constraint(expr= m.x122 + 1.43894002153683*m.b147 <= 1.43894002153683)
m.c162 = Constraint(expr= - m.x105 + m.x123 == 0)
m.c163 = Constraint(expr= m.x106 == 0)
m.c164 = Constraint(expr= m.x124 == 0)
m.c165 = Constraint(expr= m.x33 - m.x105 - m.x106 == 0)
m.c166 = Constraint(expr= m.x42 - m.x123 - m.x124 == 0)
m.c167 = Constraint(expr= m.x105 - 2.1*m.b148 <= 0)
m.c168 = Constraint(expr= m.x106 + 2.1*m.b148 <= 2.1)
m.c169 = Constraint(expr= m.x123 - 2.1*m.b148 <= 0)
m.c170 = Constraint(expr= m.x124 + 2.1*m.b148 <= 2.1)
m.c171 = Constraint(expr= - m.x107 + m.x125 == 0)
m.c172 = Constraint(expr= m.x108 == 0)
m.c173 = Constraint(expr= m.x126 == 0)
m.c174 = Constraint(expr= m.x34 - m.x107 - m.x108 == 0)
m.c175 = Constraint(expr= m.x43 - m.x125 - m.x126 == 0)
m.c176 = Constraint(expr= m.x107 - 2.1*m.b149 <= 0)
m.c177 = Constraint(expr= m.x108 + 2.1*m.b149 <= 2.1)
m.c178 = Constraint(expr= m.x125 - 2.1*m.b149 <= 0)
m.c179 = Constraint(expr= m.x126 + 2.1*m.b149 <= 2.1)
m.c180 = Constraint(expr=(m.x127/(0.001 + 0.999*m.b150) - 0.75*log(1 + m.x109/(0.001 + 0.999*m.b150)))*(0.001 + 0.999*
m.b150) <= 0)
m.c181 = Constraint(expr= m.x110 == 0)
m.c182 = Constraint(expr= m.x128 == 0)
m.c183 = Constraint(expr= m.x35 - m.x109 - m.x110 == 0)
m.c184 = Constraint(expr= m.x44 - m.x127 - m.x128 == 0)
m.c185 = Constraint(expr= m.x109 - 1.6544851364539*m.b150 <= 0)
m.c186 = Constraint(expr= m.x110 + 1.6544851364539*m.b150 <= 1.6544851364539)
m.c187 = Constraint(expr= m.x127 - 0.732188035236726*m.b150 <= 0)
m.c188 = Constraint(expr= m.x128 + 0.732188035236726*m.b150 <= 0.732188035236726)
m.c189 = Constraint(expr=(m.x129/(0.001 + 0.999*m.b151) - 0.8*log(1 + m.x111/(0.001 + 0.999*m.b151)))*(0.001 + 0.999*
m.b151) <= 0)
m.c190 = Constraint(expr= m.x112 == 0)
m.c191 = Constraint(expr= m.x130 == 0)
m.c192 = Constraint(expr= m.x36 - m.x111 - m.x112 == 0)
m.c193 = Constraint(expr= m.x45 - m.x129 - m.x130 == 0)
m.c194 = Constraint(expr= m.x111 - 1.6544851364539*m.b151 <= 0)
m.c195 = Constraint(expr= m.x112 + 1.6544851364539*m.b151 <= 1.6544851364539)
m.c196 = Constraint(expr= m.x129 - 0.781000570919175*m.b151 <= 0)
m.c197 = Constraint(expr= m.x130 + 0.781000570919175*m.b151 <= 0.781000570919175)
m.c198 = Constraint(expr=(m.x131/(0.001 + 0.999*m.b152) - 0.85*log(1 + m.x113/(0.001 + 0.999*m.b152)))*(0.001 + 0.999*
m.b152) <= 0)
m.c199 = Constraint(expr= m.x114 == 0)
m.c200 = Constraint(expr= m.x132 == 0)
m.c201 = Constraint(expr= m.x37 - m.x113 - m.x114 == 0)
m.c202 = Constraint(expr= m.x46 - m.x131 - m.x132 == 0)
m.c203 = Constraint(expr= m.x113 - 1.6544851364539*m.b152 <= 0)
m.c204 = Constraint(expr= m.x114 + 1.6544851364539*m.b152 <= 1.6544851364539)
m.c205 = Constraint(expr= m.x131 - 0.829813106601623*m.b152 <= 0)
m.c206 = Constraint(expr= m.x132 + 0.829813106601623*m.b152 <= 0.829813106601623)
m.c207 = Constraint(expr= m.b133 + m.b134 == 1)
m.c208 = Constraint(expr= - m.b135 + m.b138 + m.b139 >= 0)
m.c209 = Constraint(expr= - m.b138 + m.b144 >= 0)
m.c210 = Constraint(expr= - m.b139 + m.b145 >= 0)
m.c211 = Constraint(expr= - m.b136 + m.b140 >= 0)
m.c212 = Constraint(expr= - m.b140 + m.b146 + m.b147 >= 0)
m.c213 = Constraint(expr= - m.b137 + m.b141 + m.b142 + m.b143 >= 0)
m.c214 = Constraint(expr= - m.b141 + m.b147 >= 0)
m.c215 = Constraint(expr= - m.b142 + m.b148 + m.b149 >= 0)
m.c216 = Constraint(expr= - m.b143 + m.b150 + m.b151 + m.b152 >= 0)
m.c217 = Constraint(expr= m.b133 + m.b134 - m.b135 >= 0)
m.c218 = Constraint(expr= m.b133 + m.b134 - m.b136 >= 0)
m.c219 = Constraint(expr= m.b133 + m.b134 - m.b137 >= 0)
m.c220 = Constraint(expr= m.b135 - m.b138 >= 0)
m.c221 = Constraint(expr= m.b135 - m.b139 >= 0)
m.c222 = Constraint(expr= m.b136 - m.b140 >= 0)
m.c223 = Constraint(expr= m.b137 - m.b141 >= 0)
m.c224 = Constraint(expr= m.b137 - m.b142 >= 0)
m.c225 = Constraint(expr= m.b137 - m.b143 >= 0)
m.c226 = Constraint(expr= m.b138 - m.b144 >= 0)
m.c227 = Constraint(expr= m.b139 - m.b145 >= 0)
m.c228 = Constraint(expr= m.b140 - m.b146 >= 0)
m.c229 = Constraint(expr= m.b140 - m.b147 >= 0)
m.c230 = Constraint(expr= m.b142 - m.b148 >= 0)
m.c231 = Constraint(expr= m.b142 - m.b149 >= 0)
m.c232 = Constraint(expr= m.b143 - m.b150 >= 0)
m.c233 = Constraint(expr= m.b143 - m.b151 >= 0)
m.c234 = Constraint(expr= m.b143 - m.b152 >= 0)
| 36.665658 | 120 | 0.627166 |
cf769136d937afa6c813cae6f2ac6b9974ebc4f5 | 756,114 | py | Python | cart_venv/Lib/site-packages/tensorflow_core/python/ops/gen_nn_ops.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | cart_venv/Lib/site-packages/tensorflow_core/python/ops/gen_nn_ops.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | cart_venv/Lib/site-packages/tensorflow_core/python/ops/gen_nn_ops.py | juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow- | 654be60f7986ac9bb7ce1d080ddee377c3389f93 | [
"MIT"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.tf_export import kwarg_only as _kwarg_only
from tensorflow.tools.docs import doc_controls as _doc_controls
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
r"""Performs average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
ksize: A list of `ints` that has length `>= 4`.
The size of the sliding window for each dimension of `value`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of `value`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "AvgPool",
name, _ctx.post_execution_callbacks, value, "ksize", ksize, "strides",
strides, "padding", padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return avg_pool_eager_fallback(
value, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"AvgPool", value=value, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"AvgPool", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AvgPool(value, ksize, strides, padding, data_format="NHWC", name=None):
return avg_pool(value=value, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
AvgPool.__doc__ = avg_pool.__doc__
AvgPool = _doc_controls.do_not_generate_docs(_kwarg_only(AvgPool))
tf_export("raw_ops.AvgPool")(AvgPool)
def avg_pool_eager_fallback(value, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function avg_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
_inputs_flat = [value]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"AvgPool", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Performs 3D average pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"AvgPool3D", name, _ctx.post_execution_callbacks, input, "ksize",
ksize, "strides", strides, "padding", padding, "data_format",
data_format)
return _result
except _core._FallbackException:
try:
return avg_pool3d_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"AvgPool3D", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"AvgPool3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AvgPool3D(input, ksize, strides, padding, data_format="NDHWC", name=None):
return avg_pool3d(input=input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
AvgPool3D.__doc__ = avg_pool3d.__doc__
AvgPool3D = _doc_controls.do_not_generate_docs(_kwarg_only(AvgPool3D))
tf_export("raw_ops.AvgPool3D")(AvgPool3D)
def avg_pool3d_eager_fallback(input, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function avg_pool3d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AvgPool3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def avg_pool3d_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes gradients of average pooling function.
Args:
orig_input_shape: A `Tensor` of type `int32`.
The original input dimensions.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"AvgPool3DGrad", name, _ctx.post_execution_callbacks,
orig_input_shape, grad, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return avg_pool3d_grad_eager_fallback(
orig_input_shape, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"AvgPool3DGrad", orig_input_shape=orig_input_shape, grad=grad,
ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"AvgPool3DGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AvgPool3DGrad(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None):
return avg_pool3d_grad(orig_input_shape=orig_input_shape, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
AvgPool3DGrad.__doc__ = avg_pool3d_grad.__doc__
AvgPool3DGrad = _doc_controls.do_not_generate_docs(_kwarg_only(AvgPool3DGrad))
tf_export("raw_ops.AvgPool3DGrad")(AvgPool3DGrad)
def avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function avg_pool3d_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
_inputs_flat = [orig_input_shape, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool3DGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AvgPool3DGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def avg_pool_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes gradients of the average pooling function.
Args:
orig_input_shape: A `Tensor` of type `int32`.
1-D. Shape of the original input to `avg_pool`.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
the output of `avg_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the sliding window for each dimension of the input.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"AvgPoolGrad", name, _ctx.post_execution_callbacks, orig_input_shape,
grad, "ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format)
return _result
except _core._FallbackException:
try:
return avg_pool_grad_eager_fallback(
orig_input_shape, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"AvgPoolGrad", orig_input_shape=orig_input_shape, grad=grad,
ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"AvgPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def AvgPoolGrad(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None):
return avg_pool_grad(orig_input_shape=orig_input_shape, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
AvgPoolGrad.__doc__ = avg_pool_grad.__doc__
AvgPoolGrad = _doc_controls.do_not_generate_docs(_kwarg_only(AvgPoolGrad))
tf_export("raw_ops.AvgPoolGrad")(AvgPoolGrad)
def avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function avg_pool_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
_inputs_flat = [orig_input_shape, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"AvgPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
r"""Batch normalization.
This op is deprecated. Prefer `tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A 4D input Tensor.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A `Tensor`. Must have the same type as `t`.
A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"BatchNormWithGlobalNormalization", name,
_ctx.post_execution_callbacks, t, m, v, beta, gamma,
"variance_epsilon", variance_epsilon, "scale_after_normalization",
scale_after_normalization)
return _result
except _core._FallbackException:
try:
return _batch_norm_with_global_normalization_eager_fallback(
t, m, v, beta, gamma, variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op = _op_def_lib._apply_op_helper(
"BatchNormWithGlobalNormalization", t=t, m=m, v=v, beta=beta,
gamma=gamma,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op.get_attr("scale_after_normalization"))
_execute.record_gradient(
"BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BatchNormWithGlobalNormalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
return _batch_norm_with_global_normalization(t=t, m=m, v=v, beta=beta, gamma=gamma, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization, name=name)
BatchNormWithGlobalNormalization.__doc__ = _batch_norm_with_global_normalization.__doc__
BatchNormWithGlobalNormalization = _doc_controls.do_not_generate_docs(_kwarg_only(BatchNormWithGlobalNormalization))
tf_export("raw_ops.BatchNormWithGlobalNormalization")(BatchNormWithGlobalNormalization)
def _batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _batch_norm_with_global_normalization
"""
_ctx = ctx if ctx else _context.context()
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, beta, gamma], _ctx)
(t, m, v, beta, gamma) = _inputs_T
_inputs_flat = [t, m, v, beta, gamma]
_attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"BatchNormWithGlobalNormalization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_batch_norm_with_global_normalization_grad_outputs = ["dx", "dm", "dv", "db",
"dg"]
_BatchNormWithGlobalNormalizationGradOutput = _collections.namedtuple(
"BatchNormWithGlobalNormalizationGrad",
_batch_norm_with_global_normalization_grad_outputs)
def batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None):
r"""Gradients for batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A 4D input Tensor.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this Tensor will be multiplied
with the normalized Tensor.
backprop: A `Tensor`. Must have the same type as `t`. 4D backprop Tensor.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (dx, dm, dv, db, dg).
dx: A `Tensor`. Has the same type as `t`.
dm: A `Tensor`. Has the same type as `t`.
dv: A `Tensor`. Has the same type as `t`.
db: A `Tensor`. Has the same type as `t`.
dg: A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"BatchNormWithGlobalNormalizationGrad", name,
_ctx.post_execution_callbacks, t, m, v, gamma, backprop,
"variance_epsilon", variance_epsilon, "scale_after_normalization",
scale_after_normalization)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
except _core._FallbackException:
try:
return batch_norm_with_global_normalization_grad_eager_fallback(
t, m, v, gamma, backprop, variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op = _op_def_lib._apply_op_helper(
"BatchNormWithGlobalNormalizationGrad", t=t, m=m, v=v, gamma=gamma,
backprop=backprop,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op.get_attr("scale_after_normalization"))
_execute.record_gradient(
"BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result, name)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
def BatchNormWithGlobalNormalizationGrad(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None):
return batch_norm_with_global_normalization_grad(t=t, m=m, v=v, gamma=gamma, backprop=backprop, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization, name=name)
BatchNormWithGlobalNormalizationGrad.__doc__ = batch_norm_with_global_normalization_grad.__doc__
BatchNormWithGlobalNormalizationGrad = _doc_controls.do_not_generate_docs(_kwarg_only(BatchNormWithGlobalNormalizationGrad))
tf_export("raw_ops.BatchNormWithGlobalNormalizationGrad")(BatchNormWithGlobalNormalizationGrad)
def batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function batch_norm_with_global_normalization_grad
"""
_ctx = ctx if ctx else _context.context()
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, gamma, backprop], _ctx)
(t, m, v, gamma, backprop) = _inputs_T
_inputs_flat = [t, m, v, gamma, backprop]
_attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"BatchNormWithGlobalNormalizationGrad", 5,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result, name)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
def bias_add(value, bias, data_format="NHWC", name=None):
r"""Adds `bias` to `value`.
This is a special case of `tf.add` where `bias` is restricted to be 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
bias: A `Tensor`. Must have the same type as `value`.
1-D with size the last dimension of `value`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the bias tensor will be added to the last dimension
of the value tensor.
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "BiasAdd",
name, _ctx.post_execution_callbacks, value, bias, "data_format",
data_format)
return _result
except _core._FallbackException:
try:
return bias_add_eager_fallback(
value, bias, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"BiasAdd", value=value, bias=bias, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "data_format",
_op.get_attr("data_format"))
_execute.record_gradient(
"BiasAdd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BiasAdd(value, bias, data_format="NHWC", name=None):
return bias_add(value=value, bias=bias, data_format=data_format, name=name)
BiasAdd.__doc__ = bias_add.__doc__
BiasAdd = _doc_controls.do_not_generate_docs(_kwarg_only(BiasAdd))
tf_export("raw_ops.BiasAdd")(BiasAdd)
def bias_add_eager_fallback(value, bias, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bias_add
"""
_ctx = ctx if ctx else _context.context()
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], _ctx)
(value, bias) = _inputs_T
_inputs_flat = [value, bias]
_attrs = ("T", _attr_T, "data_format", data_format)
_result = _execute.execute(b"BiasAdd", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"BiasAdd", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bias_add_grad(out_backprop, data_format="NHWC", name=None):
r"""The backward operation for "BiasAdd" on the "bias" tensor.
It accumulates all the values from out_backprop into the feature dimension.
For NHWC data format, the feature dimension is the last. For NCHW data format,
the feature dimension is the third-to-last.
Args:
out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the bias tensor will be added to the last dimension
of the value tensor.
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `out_backprop`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"BiasAddGrad", name, _ctx.post_execution_callbacks, out_backprop,
"data_format", data_format)
return _result
except _core._FallbackException:
try:
return bias_add_grad_eager_fallback(
out_backprop, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"BiasAddGrad", out_backprop=out_backprop, data_format=data_format,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "data_format",
_op.get_attr("data_format"))
_execute.record_gradient(
"BiasAddGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BiasAddGrad(out_backprop, data_format="NHWC", name=None):
return bias_add_grad(out_backprop=out_backprop, data_format=data_format, name=name)
BiasAddGrad.__doc__ = bias_add_grad.__doc__
BiasAddGrad = _doc_controls.do_not_generate_docs(_kwarg_only(BiasAddGrad))
tf_export("raw_ops.BiasAddGrad")(BiasAddGrad)
def bias_add_grad_eager_fallback(out_backprop, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bias_add_grad
"""
_ctx = ctx if ctx else _context.context()
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], _ctx)
_inputs_flat = [out_backprop]
_attrs = ("T", _attr_T, "data_format", data_format)
_result = _execute.execute(b"BiasAddGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BiasAddGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def bias_add_v1(value, bias, name=None):
r"""Adds `bias` to `value`.
This is a deprecated version of BiasAdd and will be soon removed.
This is a special case of `tf.add` where `bias` is restricted to be 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
bias: A `Tensor`. Must have the same type as `value`.
1-D with size the last dimension of `value`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"BiasAddV1", name, _ctx.post_execution_callbacks, value, bias)
return _result
except _core._FallbackException:
try:
return bias_add_v1_eager_fallback(
value, bias, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"BiasAddV1", value=value, bias=bias, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"BiasAddV1", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def BiasAddV1(value, bias, name=None):
return bias_add_v1(value=value, bias=bias, name=name)
BiasAddV1.__doc__ = bias_add_v1.__doc__
BiasAddV1 = _doc_controls.do_not_generate_docs(_kwarg_only(BiasAddV1))
tf_export("raw_ops.BiasAddV1")(BiasAddV1)
def bias_add_v1_eager_fallback(value, bias, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function bias_add_v1
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], _ctx)
(value, bias) = _inputs_T
_inputs_flat = [value, bias]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BiasAddV1", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BiasAddV1", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Conv2D",
name, _ctx.post_execution_callbacks, input, filter, "strides",
strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv2d_eager_fallback(
input, filter, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu,
padding=padding, explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv2D", input=input, filter=filter, strides=strides,
padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv2D(input, filter, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return conv2d(input=input, filter=filter, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name)
Conv2D.__doc__ = conv2d.__doc__
Conv2D = _doc_controls.do_not_generate_docs(_kwarg_only(Conv2D))
tf_export("raw_ops.Conv2D")(Conv2D)
def conv2d_eager_fallback(input, filter, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv2d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Conv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with
format.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv2DBackpropFilter", name, _ctx.post_execution_callbacks, input,
filter_sizes, out_backprop, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv2d_backprop_filter_eager_fallback(
input, filter_sizes, out_backprop, strides=strides,
use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv2DBackpropFilter", input=input, filter_sizes=filter_sizes,
out_backprop=out_backprop, strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv2DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv2DBackpropFilter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return conv2d_backprop_filter(input=input, filter_sizes=filter_sizes, out_backprop=out_backprop, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name)
Conv2DBackpropFilter.__doc__ = conv2d_backprop_filter.__doc__
Conv2DBackpropFilter = _doc_controls.do_not_generate_docs(_kwarg_only(Conv2DBackpropFilter))
tf_export("raw_ops.Conv2DBackpropFilter")(Conv2DBackpropFilter)
def conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv2d_backprop_filter
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2DBackpropFilter", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Conv2DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv2d_backprop_input(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with
format.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv2DBackpropInput", name, _ctx.post_execution_callbacks,
input_sizes, filter, out_backprop, "strides", strides,
"use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv2d_backprop_input_eager_fallback(
input_sizes, filter, out_backprop, strides=strides,
use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv2DBackpropInput", input_sizes=input_sizes, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"use_cudnn_on_gpu", _op.get_attr("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv2DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv2DBackpropInput(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return conv2d_backprop_input(input_sizes=input_sizes, filter=filter, out_backprop=out_backprop, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations, name=name)
Conv2DBackpropInput.__doc__ = conv2d_backprop_input.__doc__
Conv2DBackpropInput = _doc_controls.do_not_generate_docs(_kwarg_only(Conv2DBackpropInput))
tf_export("raw_ops.Conv2DBackpropInput")(Conv2DBackpropInput)
def conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv2d_backprop_input
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
(filter, out_backprop) = _inputs_T
input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2DBackpropInput", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Conv2DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv3d(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes a 3-D convolution given 5-D `input` and `filter` tensors.
In signal processing, cross-correlation is a measure of similarity of
two waveforms as a function of a time-lag applied to one of them. This
is also known as a sliding dot product or sliding inner-product.
Our Conv3D implements a form of cross-correlation.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, in_depth, in_height, in_width, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[filter_depth, filter_height, filter_width, in_channels,
out_channels]`. `in_channels` must match between `input` and `filter`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Conv3D",
name, _ctx.post_execution_callbacks, input, filter, "strides",
strides, "padding", padding, "data_format", data_format, "dilations",
dilations)
return _result
except _core._FallbackException:
try:
return conv3d_eager_fallback(
input, filter, strides=strides, padding=padding,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv3D", input=input, filter=filter, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv3D(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
return conv3d(input=input, filter=filter, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
Conv3D.__doc__ = conv3d.__doc__
Conv3D = _doc_controls.do_not_generate_docs(_kwarg_only(Conv3D))
tf_export("raw_ops.Conv3D")(Conv3D)
def conv3d_eager_fallback(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv3d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Conv3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv3d_backprop_filter(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv3DBackpropFilter", name, _ctx.post_execution_callbacks, input,
filter, out_backprop, "strides", strides, "padding", padding,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv3d_backprop_filter_eager_fallback(
input, filter, out_backprop, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv3DBackpropFilter", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv3DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv3DBackpropFilter(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
return conv3d_backprop_filter(input=input, filter=filter, out_backprop=out_backprop, strides=strides, padding=padding, dilations=dilations, name=name)
Conv3DBackpropFilter.__doc__ = conv3d_backprop_filter.__doc__
Conv3DBackpropFilter = _doc_controls.do_not_generate_docs(_kwarg_only(Conv3DBackpropFilter))
tf_export("raw_ops.Conv3DBackpropFilter")(Conv3DBackpropFilter)
def conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv3d_backprop_filter
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
dilations)
_result = _execute.execute(b"Conv3DBackpropFilter", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Conv3DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export(v1=['nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2'])
@deprecated_endpoints('nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2')
def conv3d_backprop_filter_v2(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 5-D
`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv3DBackpropFilterV2", name, _ctx.post_execution_callbacks, input,
filter_sizes, out_backprop, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv3d_backprop_filter_v2_eager_fallback(
input, filter_sizes, out_backprop, strides=strides,
padding=padding, data_format=data_format, dilations=dilations,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
conv3d_backprop_filter_v2, input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides, padding=padding,
data_format=data_format,
dilations=dilations, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Conv3DBackpropFilterV2", input=input, filter_sizes=filter_sizes,
out_backprop=out_backprop, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
conv3d_backprop_filter_v2, input=input, filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides, padding=padding,
data_format=data_format,
dilations=dilations, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv3DBackpropFilterV2(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
return conv3d_backprop_filter_v2(input=input, filter_sizes=filter_sizes, out_backprop=out_backprop, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
Conv3DBackpropFilterV2.__doc__ = conv3d_backprop_filter_v2.__doc__
Conv3DBackpropFilterV2 = _doc_controls.do_not_generate_docs(_kwarg_only(Conv3DBackpropFilterV2))
tf_export("raw_ops.Conv3DBackpropFilterV2")(Conv3DBackpropFilterV2)
def conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv3d_backprop_filter_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv3DBackpropFilterV2", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv3d_backprop_input(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv3DBackpropInput", name, _ctx.post_execution_callbacks, input,
filter, out_backprop, "strides", strides, "padding", padding,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv3d_backprop_input_eager_fallback(
input, filter, out_backprop, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv3DBackpropInput", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"Conv3DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv3DBackpropInput(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
return conv3d_backprop_input(input=input, filter=filter, out_backprop=out_backprop, strides=strides, padding=padding, dilations=dilations, name=name)
Conv3DBackpropInput.__doc__ = conv3d_backprop_input.__doc__
Conv3DBackpropInput = _doc_controls.do_not_generate_docs(_kwarg_only(Conv3DBackpropInput))
tf_export("raw_ops.Conv3DBackpropInput")(Conv3DBackpropInput)
def conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv3d_backprop_input
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
dilations)
_result = _execute.execute(b"Conv3DBackpropInput", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Conv3DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def conv3d_backprop_input_v2(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the input.
Args:
input_sizes: A `Tensor`. Must be one of the following types: `int32`, `int64`.
An integer vector representing the tensor shape of `input`,
where `input` is a 5-D
`[batch, depth, rows, cols, in_channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Conv3DBackpropInputV2", name, _ctx.post_execution_callbacks,
input_sizes, filter, out_backprop, "strides", strides, "padding",
padding, "data_format", data_format, "dilations", dilations)
return _result
except _core._FallbackException:
try:
return conv3d_backprop_input_v2_eager_fallback(
input_sizes, filter, out_backprop, strides=strides,
padding=padding, data_format=data_format, dilations=dilations,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"Conv3DBackpropInputV2", input_sizes=input_sizes, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"), "Tshape", _op._get_attr_type("Tshape"))
_execute.record_gradient(
"Conv3DBackpropInputV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Conv3DBackpropInputV2(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
return conv3d_backprop_input_v2(input_sizes=input_sizes, filter=filter, out_backprop=out_backprop, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
Conv3DBackpropInputV2.__doc__ = conv3d_backprop_input_v2.__doc__
Conv3DBackpropInputV2 = _doc_controls.do_not_generate_docs(_kwarg_only(Conv3DBackpropInputV2))
tf_export("raw_ops.Conv3DBackpropInputV2")(Conv3DBackpropInputV2)
def conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function conv3d_backprop_input_v2
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
(filter, out_backprop) = _inputs_T
_attr_Tshape, (input_sizes,) = _execute.args_to_matching_eager([input_sizes], _ctx, _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations, "Tshape", _attr_Tshape)
_result = _execute.execute(b"Conv3DBackpropInputV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Conv3DBackpropInputV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def data_format_dim_map(x, src_format="NHWC", dst_format="NCHW", name=None):
r"""Returns the dimension index in the destination data format given the one in
the source data format.
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor with each element as a dimension index in source data format.
Must be in the range [-4, 4).
src_format: An optional `string`. Defaults to `"NHWC"`.
source data format.
dst_format: An optional `string`. Defaults to `"NCHW"`.
destination data format.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DataFormatDimMap", name, _ctx.post_execution_callbacks, x,
"src_format", src_format, "dst_format", dst_format)
return _result
except _core._FallbackException:
try:
return data_format_dim_map_eager_fallback(
x, src_format=src_format, dst_format=dst_format, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_, _, _op = _op_def_lib._apply_op_helper(
"DataFormatDimMap", x=x, src_format=src_format, dst_format=dst_format,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "src_format",
_op.get_attr("src_format"), "dst_format",
_op.get_attr("dst_format"))
_execute.record_gradient(
"DataFormatDimMap", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DataFormatDimMap(x, src_format="NHWC", dst_format="NCHW", name=None):
return data_format_dim_map(x=x, src_format=src_format, dst_format=dst_format, name=name)
DataFormatDimMap.__doc__ = data_format_dim_map.__doc__
DataFormatDimMap = _doc_controls.do_not_generate_docs(_kwarg_only(DataFormatDimMap))
tf_export("raw_ops.DataFormatDimMap")(DataFormatDimMap)
def data_format_dim_map_eager_fallback(x, src_format="NHWC", dst_format="NCHW", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function data_format_dim_map
"""
_ctx = ctx if ctx else _context.context()
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
_result = _execute.execute(b"DataFormatDimMap", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DataFormatDimMap", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def data_format_vec_permute(x, src_format="NHWC", dst_format="NCHW", name=None):
r"""Returns the permuted vector/tensor in the destination data format given the
one in the source data format.
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Vector of size 4 or Tensor of shape (4, 2) in source data format.
src_format: An optional `string`. Defaults to `"NHWC"`.
source data format.
dst_format: An optional `string`. Defaults to `"NCHW"`.
destination data format.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DataFormatVecPermute", name, _ctx.post_execution_callbacks, x,
"src_format", src_format, "dst_format", dst_format)
return _result
except _core._FallbackException:
try:
return data_format_vec_permute_eager_fallback(
x, src_format=src_format, dst_format=dst_format, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_, _, _op = _op_def_lib._apply_op_helper(
"DataFormatVecPermute", x=x, src_format=src_format,
dst_format=dst_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "src_format",
_op.get_attr("src_format"), "dst_format",
_op.get_attr("dst_format"))
_execute.record_gradient(
"DataFormatVecPermute", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DataFormatVecPermute(x, src_format="NHWC", dst_format="NCHW", name=None):
return data_format_vec_permute(x=x, src_format=src_format, dst_format=dst_format, name=name)
DataFormatVecPermute.__doc__ = data_format_vec_permute.__doc__
DataFormatVecPermute = _doc_controls.do_not_generate_docs(_kwarg_only(DataFormatVecPermute))
tf_export("raw_ops.DataFormatVecPermute")(DataFormatVecPermute)
def data_format_vec_permute_eager_fallback(x, src_format="NHWC", dst_format="NCHW", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function data_format_vec_permute
"""
_ctx = ctx if ctx else _context.context()
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
_result = _execute.execute(b"DataFormatVecPermute", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DataFormatVecPermute", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export(v1=['nn.depthwise_conv2d_native'])
@deprecated_endpoints('nn.depthwise_conv2d_native')
def depthwise_conv2d_native(input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DepthwiseConv2dNative", name, _ctx.post_execution_callbacks, input,
filter, "strides", strides, "padding", padding, "data_format",
data_format, "dilations", dilations)
return _result
except _core._FallbackException:
try:
return depthwise_conv2d_native_eager_fallback(
input, filter, strides=strides, padding=padding,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native, input=input, filter=filter,
strides=strides, padding=padding,
data_format=data_format,
dilations=dilations, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
try:
_, _, _op = _op_def_lib._apply_op_helper(
"DepthwiseConv2dNative", input=input, filter=filter, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native, input=input, filter=filter,
strides=strides, padding=padding,
data_format=data_format,
dilations=dilations, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"DepthwiseConv2dNative", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DepthwiseConv2dNative(input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return depthwise_conv2d_native(input=input, filter=filter, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
DepthwiseConv2dNative.__doc__ = depthwise_conv2d_native.__doc__
DepthwiseConv2dNative = _doc_controls.do_not_generate_docs(_kwarg_only(DepthwiseConv2dNative))
tf_export("raw_ops.DepthwiseConv2dNative")(DepthwiseConv2dNative)
def depthwise_conv2d_native_eager_fallback(input, filter, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function depthwise_conv2d_native
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNative", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DepthwiseConv2dNative", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.depthwise_conv2d_backprop_filter', v1=['nn.depthwise_conv2d_native_backprop_filter', 'nn.depthwise_conv2d_backprop_filter'])
@deprecated_endpoints('nn.depthwise_conv2d_native_backprop_filter')
def depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape based on `data_format`. For example, if
`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DepthwiseConv2dNativeBackpropFilter", name,
_ctx.post_execution_callbacks, input, filter_sizes, out_backprop,
"strides", strides, "padding", padding, "data_format", data_format,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return depthwise_conv2d_native_backprop_filter_eager_fallback(
input, filter_sizes, out_backprop, strides=strides,
padding=padding, data_format=data_format, dilations=dilations,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native_backprop_filter, input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
try:
_, _, _op = _op_def_lib._apply_op_helper(
"DepthwiseConv2dNativeBackpropFilter", input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native_backprop_filter, input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DepthwiseConv2dNativeBackpropFilter(input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return depthwise_conv2d_native_backprop_filter(input=input, filter_sizes=filter_sizes, out_backprop=out_backprop, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
DepthwiseConv2dNativeBackpropFilter.__doc__ = depthwise_conv2d_native_backprop_filter.__doc__
DepthwiseConv2dNativeBackpropFilter = _doc_controls.do_not_generate_docs(_kwarg_only(DepthwiseConv2dNativeBackpropFilter))
tf_export("raw_ops.DepthwiseConv2dNativeBackpropFilter")(DepthwiseConv2dNativeBackpropFilter)
def depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function depthwise_conv2d_native_backprop_filter
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], _ctx)
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNativeBackpropFilter", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.depthwise_conv2d_backprop_input', v1=['nn.depthwise_conv2d_native_backprop_input', 'nn.depthwise_conv2d_backprop_input'])
@deprecated_endpoints('nn.depthwise_conv2d_native_backprop_input')
def depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`, based
on `data_format`. For example, if `data_format` is 'NHWC' then
`input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"DepthwiseConv2dNativeBackpropInput", name,
_ctx.post_execution_callbacks, input_sizes, filter, out_backprop,
"strides", strides, "padding", padding, "data_format", data_format,
"dilations", dilations)
return _result
except _core._FallbackException:
try:
return depthwise_conv2d_native_backprop_input_eager_fallback(
input_sizes, filter, out_backprop, strides=strides,
padding=padding, data_format=data_format, dilations=dilations,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native_backprop_input, input_sizes=input_sizes,
filter=filter,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
try:
_, _, _op = _op_def_lib._apply_op_helper(
"DepthwiseConv2dNativeBackpropInput", input_sizes=input_sizes,
filter=filter,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
depthwise_conv2d_native_backprop_input, input_sizes=input_sizes,
filter=filter,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def DepthwiseConv2dNativeBackpropInput(input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
return depthwise_conv2d_native_backprop_input(input_sizes=input_sizes, filter=filter, out_backprop=out_backprop, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)
DepthwiseConv2dNativeBackpropInput.__doc__ = depthwise_conv2d_native_backprop_input.__doc__
DepthwiseConv2dNativeBackpropInput = _doc_controls.do_not_generate_docs(_kwarg_only(DepthwiseConv2dNativeBackpropInput))
tf_export("raw_ops.DepthwiseConv2dNativeBackpropInput")(DepthwiseConv2dNativeBackpropInput)
def depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format="NHWC", dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function depthwise_conv2d_native_backprop_input
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], _ctx)
(filter, out_backprop) = _inputs_T
input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNativeBackpropInput", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def dilation2d(input, filter, strides, rates, padding, name=None):
r"""Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own structuring
function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the default
"NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filter[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filter` is equal to the
negation of the erosion of `-input` by the reflected `filter`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Dilation2D", name, _ctx.post_execution_callbacks, input, filter,
"strides", strides, "rates", rates, "padding", padding)
return _result
except _core._FallbackException:
try:
return dilation2d_eager_fallback(
input, filter, strides=strides, rates=rates, padding=padding,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"Dilation2D", input=input, filter=filter, strides=strides,
rates=rates, padding=padding, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"rates", _op.get_attr("rates"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"Dilation2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Dilation2D(input, filter, strides, rates, padding, name=None):
return dilation2d(input=input, filter=filter, strides=strides, rates=rates, padding=padding, name=name)
Dilation2D.__doc__ = dilation2d.__doc__
Dilation2D = _doc_controls.do_not_generate_docs(_kwarg_only(Dilation2D))
tf_export("raw_ops.Dilation2D")(Dilation2D)
def dilation2d_eager_fallback(input, filter, strides, rates, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function dilation2d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Dilation2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def dilation2d_backprop_filter(input, filter, out_backprop, strides, rates, padding, name=None):
r"""Computes the gradient of morphological 2-D dilation with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Dilation2DBackpropFilter", name, _ctx.post_execution_callbacks,
input, filter, out_backprop, "strides", strides, "rates", rates,
"padding", padding)
return _result
except _core._FallbackException:
try:
return dilation2d_backprop_filter_eager_fallback(
input, filter, out_backprop, strides=strides, rates=rates,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_filter' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"Dilation2DBackpropFilter", input=input, filter=filter,
out_backprop=out_backprop,
strides=strides, rates=rates,
padding=padding, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"rates", _op.get_attr("rates"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"Dilation2DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Dilation2DBackpropFilter(input, filter, out_backprop, strides, rates, padding, name=None):
return dilation2d_backprop_filter(input=input, filter=filter, out_backprop=out_backprop, strides=strides, rates=rates, padding=padding, name=name)
Dilation2DBackpropFilter.__doc__ = dilation2d_backprop_filter.__doc__
Dilation2DBackpropFilter = _doc_controls.do_not_generate_docs(_kwarg_only(Dilation2DBackpropFilter))
tf_export("raw_ops.Dilation2DBackpropFilter")(Dilation2DBackpropFilter)
def dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, rates, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function dilation2d_backprop_filter
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_filter' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2DBackpropFilter", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"Dilation2DBackpropFilter", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def dilation2d_backprop_input(input, filter, out_backprop, strides, rates, padding, name=None):
r"""Computes the gradient of morphological 2-D dilation with respect to the input.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Dilation2DBackpropInput", name, _ctx.post_execution_callbacks, input,
filter, out_backprop, "strides", strides, "rates", rates, "padding",
padding)
return _result
except _core._FallbackException:
try:
return dilation2d_backprop_input_eager_fallback(
input, filter, out_backprop, strides=strides, rates=rates,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_input' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"Dilation2DBackpropInput", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
rates=rates, padding=padding, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"),
"rates", _op.get_attr("rates"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"Dilation2DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Dilation2DBackpropInput(input, filter, out_backprop, strides, rates, padding, name=None):
return dilation2d_backprop_input(input=input, filter=filter, out_backprop=out_backprop, strides=strides, rates=rates, padding=padding, name=name)
Dilation2DBackpropInput.__doc__ = dilation2d_backprop_input.__doc__
Dilation2DBackpropInput = _doc_controls.do_not_generate_docs(_kwarg_only(Dilation2DBackpropInput))
tf_export("raw_ops.Dilation2DBackpropInput")(Dilation2DBackpropInput)
def dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides, rates, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function dilation2d_backprop_input
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_input' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], _ctx)
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2DBackpropInput", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"Dilation2DBackpropInput", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.elu')
def elu(features, name=None):
r"""Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
](http://arxiv.org/abs/1511.07289)
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Elu",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return elu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
elu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Elu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
elu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Elu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Elu(features, name=None):
return elu(features=features, name=name)
Elu.__doc__ = elu.__doc__
Elu = _doc_controls.do_not_generate_docs(_kwarg_only(Elu))
tf_export("raw_ops.Elu")(Elu)
def elu_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function elu
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Elu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Elu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def elu_grad(gradients, outputs, name=None):
r"""Computes gradients for the exponential linear (Elu) operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding Elu operation.
outputs: A `Tensor`. Must have the same type as `gradients`.
The outputs of the corresponding Elu operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "EluGrad",
name, _ctx.post_execution_callbacks, gradients, outputs)
return _result
except _core._FallbackException:
try:
return elu_grad_eager_fallback(
gradients, outputs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"EluGrad", gradients=gradients, outputs=outputs, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"EluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def EluGrad(gradients, outputs, name=None):
return elu_grad(gradients=gradients, outputs=outputs, name=name)
EluGrad.__doc__ = elu_grad.__doc__
EluGrad = _doc_controls.do_not_generate_docs(_kwarg_only(EluGrad))
tf_export("raw_ops.EluGrad")(EluGrad)
def elu_grad_eager_fallback(gradients, outputs, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function elu_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], _ctx)
(gradients, outputs) = _inputs_T
_inputs_flat = [gradients, outputs]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"EluGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"EluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_fractional_avg_pool_outputs = ["output", "row_pooling_sequence",
"col_pooling_sequence"]
_FractionalAvgPoolOutput = _collections.namedtuple(
"FractionalAvgPool", _fractional_avg_pool_outputs)
def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length `>= 4`.
Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid
pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
must be 1.0 because we don't allow pooling on batch and channels
dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
respectively.
pseudo_random: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [41/3, 26/3] for fractional avg pooling.
deterministic: An optional `bool`. Defaults to `False`.
When set to True, a fixed pooling region will be used when
iterating over a FractionalAvgPool node in the computation graph. Mainly used
in unit test to make FractionalAvgPool deterministic.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 are set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, it is seeded by a
random seed.
seed2: An optional `int`. Defaults to `0`.
An second seed to avoid seed collision.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
output: A `Tensor`. Has the same type as `value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FractionalAvgPool", name, _ctx.post_execution_callbacks, value,
"pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed",
seed, "seed2", seed2)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
except _core._FallbackException:
try:
return fractional_avg_pool_eager_fallback(
value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
overlapping=overlapping, deterministic=deterministic, seed=seed,
seed2=seed2, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_avg_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_, _, _op = _op_def_lib._apply_op_helper(
"FractionalAvgPool", value=value, pooling_ratio=pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=deterministic, seed=seed,
seed2=seed2, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
_op.get_attr("pseudo_random"), "overlapping",
_op.get_attr("overlapping"), "deterministic",
_op.get_attr("deterministic"), "seed", _op.get_attr("seed"),
"seed2", _op.get_attr("seed2"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"FractionalAvgPool", _inputs_flat, _attrs, _result, name)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
def FractionalAvgPool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
return fractional_avg_pool(value=value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2, name=name)
FractionalAvgPool.__doc__ = fractional_avg_pool.__doc__
FractionalAvgPool = _doc_controls.do_not_generate_docs(_kwarg_only(FractionalAvgPool))
tf_export("raw_ops.FractionalAvgPool")(FractionalAvgPool)
def fractional_avg_pool_eager_fallback(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fractional_avg_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_avg_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
_inputs_flat = [value]
_attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed", seed,
"seed2", seed2, "T", _attr_T)
_result = _execute.execute(b"FractionalAvgPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FractionalAvgPool", _inputs_flat, _attrs, _result, name)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
def fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
r"""Computes gradient of the FractionalAvgPool function.
Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
out_backprop to those indices that form the same pooling cell. Therefore, we
just need to know the shape of original input tensor, instead of the whole
tensor.
Args:
orig_input_tensor_shape: A `Tensor` of type `int64`.
Original input tensor shape for `fractional_avg_pool`
out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_avg_pool`.
row_pooling_sequence: A `Tensor` of type `int64`.
row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: A `Tensor` of type `int64`.
column pooling sequence, form pooling region with
row_pooling sequence.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [41/3, 26/3] for fractional avg pooling.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `out_backprop`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FractionalAvgPoolGrad", name, _ctx.post_execution_callbacks,
orig_input_tensor_shape, out_backprop, row_pooling_sequence,
col_pooling_sequence, "overlapping", overlapping)
return _result
except _core._FallbackException:
try:
return fractional_avg_pool_grad_eager_fallback(
orig_input_tensor_shape, out_backprop, row_pooling_sequence,
col_pooling_sequence, overlapping=overlapping, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_, _, _op = _op_def_lib._apply_op_helper(
"FractionalAvgPoolGrad", orig_input_tensor_shape=orig_input_tensor_shape,
out_backprop=out_backprop,
row_pooling_sequence=row_pooling_sequence,
col_pooling_sequence=col_pooling_sequence,
overlapping=overlapping, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("overlapping", _op.get_attr("overlapping"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"FractionalAvgPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FractionalAvgPoolGrad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
return fractional_avg_pool_grad(orig_input_tensor_shape=orig_input_tensor_shape, out_backprop=out_backprop, row_pooling_sequence=row_pooling_sequence, col_pooling_sequence=col_pooling_sequence, overlapping=overlapping, name=name)
FractionalAvgPoolGrad.__doc__ = fractional_avg_pool_grad.__doc__
FractionalAvgPoolGrad = _doc_controls.do_not_generate_docs(_kwarg_only(FractionalAvgPoolGrad))
tf_export("raw_ops.FractionalAvgPoolGrad")(FractionalAvgPoolGrad)
def fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fractional_avg_pool_grad
"""
_ctx = ctx if ctx else _context.context()
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], _ctx)
orig_input_tensor_shape = _ops.convert_to_tensor(orig_input_tensor_shape, _dtypes.int64)
row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
_inputs_flat = [orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence]
_attrs = ("overlapping", overlapping, "T", _attr_T)
_result = _execute.execute(b"FractionalAvgPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FractionalAvgPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_fractional_max_pool_outputs = ["output", "row_pooling_sequence",
"col_pooling_sequence"]
_FractionalMaxPoolOutput = _collections.namedtuple(
"FractionalMaxPool", _fractional_max_pool_outputs)
def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly uniform.
For example, let's look at the height dimension, and the constraints on the
list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper:
[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length `>= 4`.
Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid
pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
must be 1.0 because we don't allow pooling on batch and channels
dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
respectively.
pseudo_random: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Defaults to `False`.
When set to True, a fixed pooling region will be used when
iterating over a FractionalMaxPool node in the computation graph. Mainly used
in unit test to make FractionalMaxPool deterministic.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 are set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, it is seeded by a
random seed.
seed2: An optional `int`. Defaults to `0`.
An second seed to avoid seed collision.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
output: A `Tensor`. Has the same type as `value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FractionalMaxPool", name, _ctx.post_execution_callbacks, value,
"pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed",
seed, "seed2", seed2)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
except _core._FallbackException:
try:
return fractional_max_pool_eager_fallback(
value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
overlapping=overlapping, deterministic=deterministic, seed=seed,
seed2=seed2, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_max_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_, _, _op = _op_def_lib._apply_op_helper(
"FractionalMaxPool", value=value, pooling_ratio=pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=deterministic, seed=seed,
seed2=seed2, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
_op.get_attr("pseudo_random"), "overlapping",
_op.get_attr("overlapping"), "deterministic",
_op.get_attr("deterministic"), "seed", _op.get_attr("seed"),
"seed2", _op.get_attr("seed2"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"FractionalMaxPool", _inputs_flat, _attrs, _result, name)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
def FractionalMaxPool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
return fractional_max_pool(value=value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2, name=name)
FractionalMaxPool.__doc__ = fractional_max_pool.__doc__
FractionalMaxPool = _doc_controls.do_not_generate_docs(_kwarg_only(FractionalMaxPool))
tf_export("raw_ops.FractionalMaxPool")(FractionalMaxPool)
def fractional_max_pool_eager_fallback(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fractional_max_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_max_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)
_inputs_flat = [value]
_attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed", seed,
"seed2", seed2, "T", _attr_T)
_result = _execute.execute(b"FractionalMaxPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FractionalMaxPool", _inputs_flat, _attrs, _result, name)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
def fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
r"""Computes gradient of the FractionalMaxPool function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
Original input for `fractional_max_pool`
orig_output: A `Tensor`. Must have the same type as `orig_input`.
Original output for `fractional_max_pool`
out_backprop: A `Tensor`. Must have the same type as `orig_input`.
4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_max_pool`.
row_pooling_sequence: A `Tensor` of type `int64`.
row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: A `Tensor` of type `int64`.
column pooling sequence, form pooling region with
row_pooling sequence.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [20, 16] for fractional max pooling.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FractionalMaxPoolGrad", name, _ctx.post_execution_callbacks,
orig_input, orig_output, out_backprop, row_pooling_sequence,
col_pooling_sequence, "overlapping", overlapping)
return _result
except _core._FallbackException:
try:
return fractional_max_pool_grad_eager_fallback(
orig_input, orig_output, out_backprop, row_pooling_sequence,
col_pooling_sequence, overlapping=overlapping, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_, _, _op = _op_def_lib._apply_op_helper(
"FractionalMaxPoolGrad", orig_input=orig_input,
orig_output=orig_output,
out_backprop=out_backprop,
row_pooling_sequence=row_pooling_sequence,
col_pooling_sequence=col_pooling_sequence,
overlapping=overlapping, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("overlapping", _op.get_attr("overlapping"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"FractionalMaxPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FractionalMaxPoolGrad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
return fractional_max_pool_grad(orig_input=orig_input, orig_output=orig_output, out_backprop=out_backprop, row_pooling_sequence=row_pooling_sequence, col_pooling_sequence=col_pooling_sequence, overlapping=overlapping, name=name)
FractionalMaxPoolGrad.__doc__ = fractional_max_pool_grad.__doc__
FractionalMaxPoolGrad = _doc_controls.do_not_generate_docs(_kwarg_only(FractionalMaxPoolGrad))
tf_export("raw_ops.FractionalMaxPoolGrad")(FractionalMaxPoolGrad)
def fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fractional_max_pool_grad
"""
_ctx = ctx if ctx else _context.context()
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, out_backprop], _ctx)
(orig_input, orig_output, out_backprop) = _inputs_T
row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
_inputs_flat = [orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence]
_attrs = ("overlapping", overlapping, "T", _attr_T)
_result = _execute.execute(b"FractionalMaxPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FractionalMaxPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
__fused_batch_norm_outputs = ["y", "batch_mean", "batch_variance",
"reserve_space_1", "reserve_space_2"]
_FusedBatchNormOutput = _collections.namedtuple(
"FusedBatchNorm", __fused_batch_norm_outputs)
def _fused_batch_norm(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `x`.
batch_variance: A `Tensor`. Has the same type as `x`.
reserve_space_1: A `Tensor`. Has the same type as `x`.
reserve_space_2: A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNorm", name, _ctx.post_execution_callbacks, x, scale,
offset, mean, variance, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _FusedBatchNormOutput._make(_result)
return _result
except _core._FallbackException:
try:
return _fused_batch_norm_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNorm", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
data_format=data_format, is_training=is_training,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"),
"data_format", _op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNorm", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormOutput._make(_result)
return _result
def FusedBatchNorm(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return _fused_batch_norm(x=x, scale=scale, offset=offset, mean=mean, variance=variance, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNorm.__doc__ = _fused_batch_norm.__doc__
FusedBatchNorm = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNorm))
tf_export("raw_ops.FusedBatchNorm")(FusedBatchNorm)
def _fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function _fused_batch_norm
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, scale, offset, mean, variance], _ctx)
(x, scale, offset, mean, variance) = _inputs_T
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format,
"is_training", is_training)
_result = _execute.execute(b"FusedBatchNorm", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNorm", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormOutput._make(_result)
return _result
_fused_batch_norm_grad_outputs = ["x_backprop", "scale_backprop",
"offset_backprop", "reserve_space_3",
"reserve_space_4"]
_FusedBatchNormGradOutput = _collections.namedtuple(
"FusedBatchNormGrad", _fused_batch_norm_grad_outputs)
def fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor`. Must have the same type as `y_backprop`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must have the same type as `y_backprop`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `y_backprop`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `y_backprop`.
offset_backprop: A `Tensor`. Has the same type as `y_backprop`.
reserve_space_3: A `Tensor`. Has the same type as `y_backprop`.
reserve_space_4: A `Tensor`. Has the same type as `y_backprop`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNormGrad", name, _ctx.post_execution_callbacks, y_backprop,
x, scale, reserve_space_1, reserve_space_2, "epsilon", epsilon,
"data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
except _core._FallbackException:
try:
return fused_batch_norm_grad_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
epsilon=epsilon, data_format=data_format, is_training=is_training,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNormGrad", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"),
"data_format", _op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNormGrad", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
def FusedBatchNormGrad(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return fused_batch_norm_grad(y_backprop=y_backprop, x=x, scale=scale, reserve_space_1=reserve_space_1, reserve_space_2=reserve_space_2, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNormGrad.__doc__ = fused_batch_norm_grad.__doc__
FusedBatchNormGrad = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNormGrad))
tf_export("raw_ops.FusedBatchNormGrad")(FusedBatchNormGrad)
def fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_batch_norm_grad
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x, scale, reserve_space_1, reserve_space_2], _ctx)
(y_backprop, x, scale, reserve_space_1, reserve_space_2) = _inputs_T
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
_attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format,
"is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGrad", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNormGrad", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
_fused_batch_norm_grad_v2_outputs = ["x_backprop", "scale_backprop",
"offset_backprop", "reserve_space_3",
"reserve_space_4"]
_FusedBatchNormGradV2Output = _collections.namedtuple(
"FusedBatchNormGradV2", _fused_batch_norm_grad_v2_outputs)
def fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor` of type `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must be one of the following types: `float32`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_3: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNormGradV2", name, _ctx.post_execution_callbacks,
y_backprop, x, scale, reserve_space_1, reserve_space_2, "epsilon",
epsilon, "data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
except _core._FallbackException:
try:
return fused_batch_norm_grad_v2_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
epsilon=epsilon, data_format=data_format, is_training=is_training,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNormGradV2", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNormGradV2", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
def FusedBatchNormGradV2(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return fused_batch_norm_grad_v2(y_backprop=y_backprop, x=x, scale=scale, reserve_space_1=reserve_space_1, reserve_space_2=reserve_space_2, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNormGradV2.__doc__ = fused_batch_norm_grad_v2.__doc__
FusedBatchNormGradV2 = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNormGradV2))
tf_export("raw_ops.FusedBatchNormGradV2")(FusedBatchNormGradV2)
def fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_batch_norm_grad_v2
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], _ctx)
(y_backprop, x) = _inputs_T
_attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2], _ctx)
(reserve_space_1, reserve_space_2) = _inputs_U
scale = _ops.convert_to_tensor(scale, _dtypes.float32)
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGradV2", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNormGradV2", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
_fused_batch_norm_grad_v3_outputs = ["x_backprop", "scale_backprop",
"offset_backprop", "reserve_space_4",
"reserve_space_5"]
_FusedBatchNormGradV3Output = _collections.namedtuple(
"FusedBatchNormGradV3", _fused_batch_norm_grad_v3_outputs)
def fused_batch_norm_grad_v3(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor` of type `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must be one of the following types: `float32`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
reserve_space_3: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for some intermediate results to be reused
in gradient computation. When is_training is False, a dummy empty Tensor will be
created.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_4, reserve_space_5).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_5: A `Tensor`. Has the same type as `reserve_space_1`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNormGradV3", name, _ctx.post_execution_callbacks,
y_backprop, x, scale, reserve_space_1, reserve_space_2,
reserve_space_3, "epsilon", epsilon, "data_format", data_format,
"is_training", is_training)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
except _core._FallbackException:
try:
return fused_batch_norm_grad_v3_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
reserve_space_3, epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNormGradV3", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
reserve_space_3=reserve_space_3,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNormGradV3", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
def FusedBatchNormGradV3(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return fused_batch_norm_grad_v3(y_backprop=y_backprop, x=x, scale=scale, reserve_space_1=reserve_space_1, reserve_space_2=reserve_space_2, reserve_space_3=reserve_space_3, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNormGradV3.__doc__ = fused_batch_norm_grad_v3.__doc__
FusedBatchNormGradV3 = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNormGradV3))
tf_export("raw_ops.FusedBatchNormGradV3")(FusedBatchNormGradV3)
def fused_batch_norm_grad_v3_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_batch_norm_grad_v3
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], _ctx)
(y_backprop, x) = _inputs_T
_attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2, reserve_space_3], _ctx)
(reserve_space_1, reserve_space_2, reserve_space_3) = _inputs_U
scale = _ops.convert_to_tensor(scale, _dtypes.float32)
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGradV3", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNormGradV3", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
_fused_batch_norm_v2_outputs = ["y", "batch_mean", "batch_variance",
"reserve_space_1", "reserve_space_2"]
_FusedBatchNormV2Output = _collections.namedtuple(
"FusedBatchNormV2", _fused_batch_norm_v2_outputs)
def fused_batch_norm_v2(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must be one of the following types: `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `scale`.
batch_variance: A `Tensor`. Has the same type as `scale`.
reserve_space_1: A `Tensor`. Has the same type as `scale`.
reserve_space_2: A `Tensor`. Has the same type as `scale`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNormV2", name, _ctx.post_execution_callbacks, x, scale,
offset, mean, variance, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _FusedBatchNormV2Output._make(_result)
return _result
except _core._FallbackException:
try:
return fused_batch_norm_v2_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNormV2", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
data_format=data_format, is_training=is_training,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNormV2", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormV2Output._make(_result)
return _result
def FusedBatchNormV2(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return fused_batch_norm_v2(x=x, scale=scale, offset=offset, mean=mean, variance=variance, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNormV2.__doc__ = fused_batch_norm_v2.__doc__
FusedBatchNormV2 = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNormV2))
tf_export("raw_ops.FusedBatchNormV2")(FusedBatchNormV2)
def fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_batch_norm_v2
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], _ctx)
(scale, offset, mean, variance) = _inputs_U
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormV2", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNormV2", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormV2Output._make(_result)
return _result
_fused_batch_norm_v3_outputs = ["y", "batch_mean", "batch_variance",
"reserve_space_1", "reserve_space_2",
"reserve_space_3"]
_FusedBatchNormV3Output = _collections.namedtuple(
"FusedBatchNormV3", _fused_batch_norm_v3_outputs)
def fused_batch_norm_v3(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must be one of the following types: `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2, reserve_space_3).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `scale`.
batch_variance: A `Tensor`. Has the same type as `scale`.
reserve_space_1: A `Tensor`. Has the same type as `scale`.
reserve_space_2: A `Tensor`. Has the same type as `scale`.
reserve_space_3: A `Tensor`. Has the same type as `scale`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedBatchNormV3", name, _ctx.post_execution_callbacks, x, scale,
offset, mean, variance, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _FusedBatchNormV3Output._make(_result)
return _result
except _core._FallbackException:
try:
return fused_batch_norm_v3_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedBatchNormV3", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
data_format=data_format, is_training=is_training,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op.get_attr("is_training"))
_execute.record_gradient(
"FusedBatchNormV3", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormV3Output._make(_result)
return _result
def FusedBatchNormV3(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
return fused_batch_norm_v3(x=x, scale=scale, offset=offset, mean=mean, variance=variance, epsilon=epsilon, data_format=data_format, is_training=is_training, name=name)
FusedBatchNormV3.__doc__ = fused_batch_norm_v3.__doc__
FusedBatchNormV3 = _doc_controls.do_not_generate_docs(_kwarg_only(FusedBatchNormV3))
tf_export("raw_ops.FusedBatchNormV3")(FusedBatchNormV3)
def fused_batch_norm_v3_eager_fallback(x, scale, offset, mean, variance, epsilon=0.0001, data_format="NHWC", is_training=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_batch_norm_v3
"""
_ctx = ctx if ctx else _context.context()
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
_attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], _ctx)
(scale, offset, mean, variance) = _inputs_U
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormV3", 6, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedBatchNormV3", _inputs_flat, _attrs, _result, name)
_result = _FusedBatchNormV3Output._make(_result)
return _result
def fused_pad_conv2d(input, paddings, filter, mode, strides, padding, name=None):
r"""Performs a padding as a preprocess during a convolution.
Similar to FusedResizeAndPadConv2d, this op allows for an optimized
implementation where the spatial padding transformation stage is fused with the
im2col lookup, but in this case without the bilinear filtering required for
resizing. Fusing the padding prevents the need to write out the intermediate
results as whole tensors, reducing memory pressure, and we can get some latency
gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
order is used instead.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
paddings: A `Tensor` of type `int32`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedPadConv2D", name, _ctx.post_execution_callbacks, input,
paddings, filter, "mode", mode, "strides", strides, "padding",
padding)
return _result
except _core._FallbackException:
try:
return fused_pad_conv2d_eager_fallback(
input, paddings, filter, mode=mode, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedPadConv2D", input=input, paddings=paddings, filter=filter,
mode=mode, strides=strides, padding=padding,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"FusedPadConv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FusedPadConv2D(input, paddings, filter, mode, strides, padding, name=None):
return fused_pad_conv2d(input=input, paddings=paddings, filter=filter, mode=mode, strides=strides, padding=padding, name=name)
FusedPadConv2D.__doc__ = fused_pad_conv2d.__doc__
FusedPadConv2D = _doc_controls.do_not_generate_docs(_kwarg_only(FusedPadConv2D))
tf_export("raw_ops.FusedPadConv2D")(FusedPadConv2D)
def fused_pad_conv2d_eager_fallback(input, paddings, filter, mode, strides, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_pad_conv2d
"""
_ctx = ctx if ctx else _context.context()
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
_inputs_flat = [input, paddings, filter]
_attrs = ("T", _attr_T, "mode", mode, "strides", strides, "padding",
padding)
_result = _execute.execute(b"FusedPadConv2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"FusedPadConv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def fused_resize_and_pad_conv2d(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None):
r"""Performs a resize and padding as a preprocess during a convolution.
It's often possible to do spatial transformations more efficiently as part of
the packing stage of a convolution, so this op allows for an optimized
implementation where these stages are fused together. This prevents the need to
write out the intermediate results as whole tensors, reducing memory pressure,
and we can get some latency gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and defaults to
'NHWC' order.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
size: A `Tensor` of type `int32`.
A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
paddings: A `Tensor` of type `int32`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
resize_align_corners: An optional `bool`. Defaults to `False`.
If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels. Defaults to false.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"FusedResizeAndPadConv2D", name, _ctx.post_execution_callbacks, input,
size, paddings, filter, "resize_align_corners", resize_align_corners,
"mode", mode, "strides", strides, "padding", padding)
return _result
except _core._FallbackException:
try:
return fused_resize_and_pad_conv2d_eager_fallback(
input, size, paddings, filter,
resize_align_corners=resize_align_corners, mode=mode,
strides=strides, padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_resize_and_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if resize_align_corners is None:
resize_align_corners = False
resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
_, _, _op = _op_def_lib._apply_op_helper(
"FusedResizeAndPadConv2D", input=input, size=size, paddings=paddings,
filter=filter, mode=mode, strides=strides,
padding=padding,
resize_align_corners=resize_align_corners,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "resize_align_corners",
_op.get_attr("resize_align_corners"), "mode",
_op.get_attr("mode"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"))
_execute.record_gradient(
"FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def FusedResizeAndPadConv2D(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None):
return fused_resize_and_pad_conv2d(input=input, size=size, paddings=paddings, filter=filter, mode=mode, strides=strides, padding=padding, resize_align_corners=resize_align_corners, name=name)
FusedResizeAndPadConv2D.__doc__ = fused_resize_and_pad_conv2d.__doc__
FusedResizeAndPadConv2D = _doc_controls.do_not_generate_docs(_kwarg_only(FusedResizeAndPadConv2D))
tf_export("raw_ops.FusedResizeAndPadConv2D")(FusedResizeAndPadConv2D)
def fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function fused_resize_and_pad_conv2d
"""
_ctx = ctx if ctx else _context.context()
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_resize_and_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if resize_align_corners is None:
resize_align_corners = False
resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], _ctx)
(input, filter) = _inputs_T
size = _ops.convert_to_tensor(size, _dtypes.int32)
paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
_inputs_flat = [input, size, paddings, filter]
_attrs = ("T", _attr_T, "resize_align_corners", resize_align_corners,
"mode", mode, "strides", strides, "padding", padding)
_result = _execute.execute(b"FusedResizeAndPadConv2D", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "InTopK",
name, _ctx.post_execution_callbacks, predictions, targets, "k", k)
return _result
except _core._FallbackException:
try:
return in_top_k_eager_fallback(
predictions, targets, k=k, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
k = _execute.make_int(k, "k")
_, _, _op = _op_def_lib._apply_op_helper(
"InTopK", predictions=predictions, targets=targets, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("k", _op.get_attr("k"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"InTopK", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def InTopK(predictions, targets, k, name=None):
return in_top_k(predictions=predictions, targets=targets, k=k, name=name)
InTopK.__doc__ = in_top_k.__doc__
InTopK = _doc_controls.do_not_generate_docs(_kwarg_only(InTopK))
tf_export("raw_ops.InTopK")(InTopK)
def in_top_k_eager_fallback(predictions, targets, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function in_top_k
"""
_ctx = ctx if ctx else _context.context()
k = _execute.make_int(k, "k")
_attr_T, (targets,) = _execute.args_to_matching_eager([targets], _ctx, _dtypes.int32)
predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
_inputs_flat = [predictions, targets]
_attrs = ("k", k, "T", _attr_T)
_result = _execute.execute(b"InTopK", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"InTopK", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def in_top_kv2(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: A `Tensor`. Must have the same type as `targets`.
Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "InTopKV2",
name, _ctx.post_execution_callbacks, predictions, targets, k)
return _result
except _core._FallbackException:
try:
return in_top_kv2_eager_fallback(
predictions, targets, k, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"InTopKV2", predictions=predictions, targets=targets, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"InTopKV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def InTopKV2(predictions, targets, k, name=None):
return in_top_kv2(predictions=predictions, targets=targets, k=k, name=name)
InTopKV2.__doc__ = in_top_kv2.__doc__
InTopKV2 = _doc_controls.do_not_generate_docs(_kwarg_only(InTopKV2))
tf_export("raw_ops.InTopKV2")(InTopKV2)
def in_top_kv2_eager_fallback(predictions, targets, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function in_top_kv2
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([targets, k], _ctx, _dtypes.int32)
(targets, k) = _inputs_T
predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
_inputs_flat = [predictions, targets, k]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"InTopKV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"InTopKV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.l2_loss')
def l2_loss(t, name=None):
r"""L2 Loss.
Computes half the L2 norm of a tensor without the `sqrt`:
output = sum(t ** 2) / 2
Args:
t: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Typically 2-D, but may have any dimensions.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "L2Loss",
name, _ctx.post_execution_callbacks, t)
return _result
except _core._FallbackException:
try:
return l2_loss_eager_fallback(
t, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
l2_loss, t=t, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"L2Loss", t=t, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
l2_loss, t=t, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"L2Loss", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def L2Loss(t, name=None):
return l2_loss(t=t, name=name)
L2Loss.__doc__ = l2_loss.__doc__
L2Loss = _doc_controls.do_not_generate_docs(_kwarg_only(L2Loss))
tf_export("raw_ops.L2Loss")(L2Loss)
def l2_loss_eager_fallback(t, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function l2_loss
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (t,) = _execute.args_to_matching_eager([t], _ctx)
_inputs_flat = [t]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"L2Loss", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"L2Loss", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.local_response_normalization', 'nn.lrn')
def lrn(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
r"""Local Response Normalization.
The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
dimension), and each vector is normalized independently. Within a given vector,
each component is divided by the weighted, squared sum of inputs within
`depth_radius`. In detail,
sqr_sum[a, b, c, d] =
sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
output = input / (bias + alpha * sqr_sum) ** beta
For details, see [Krizhevsky et al., ImageNet classification with deep
convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4-D.
depth_radius: An optional `int`. Defaults to `5`.
0-D. Half-width of the 1-D normalization window.
bias: An optional `float`. Defaults to `1`.
An offset (usually positive to avoid dividing by 0).
alpha: An optional `float`. Defaults to `1`.
A scale factor, usually positive.
beta: An optional `float`. Defaults to `0.5`. An exponent.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "LRN",
name, _ctx.post_execution_callbacks, input, "depth_radius",
depth_radius, "bias", bias, "alpha", alpha, "beta", beta)
return _result
except _core._FallbackException:
try:
return lrn_eager_fallback(
input, depth_radius=depth_radius, bias=bias, alpha=alpha,
beta=beta, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
lrn, input=input, depth_radius=depth_radius, bias=bias,
alpha=alpha, beta=beta, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
try:
_, _, _op = _op_def_lib._apply_op_helper(
"LRN", input=input, depth_radius=depth_radius, bias=bias, alpha=alpha,
beta=beta, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
lrn, input=input, depth_radius=depth_radius, bias=bias, alpha=alpha,
beta=beta, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("depth_radius", _op.get_attr("depth_radius"), "bias",
_op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
_op.get_attr("beta"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"LRN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def LRN(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
return lrn(input=input, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta, name=name)
LRN.__doc__ = lrn.__doc__
LRN = _doc_controls.do_not_generate_docs(_kwarg_only(LRN))
tf_export("raw_ops.LRN")(LRN)
def lrn_eager_fallback(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lrn
"""
_ctx = ctx if ctx else _context.context()
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
_inputs_flat = [input]
_attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
"beta", beta, "T", _attr_T)
_result = _execute.execute(b"LRN", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"LRN", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def lrn_grad(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
r"""Gradients for Local Response Normalization.
Args:
input_grads: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4-D with shape `[batch, height, width, channels]`.
input_image: A `Tensor`. Must have the same type as `input_grads`.
4-D with shape `[batch, height, width, channels]`.
output_image: A `Tensor`. Must have the same type as `input_grads`.
4-D with shape `[batch, height, width, channels]`.
depth_radius: An optional `int`. Defaults to `5`. A depth radius.
bias: An optional `float`. Defaults to `1`.
An offset (usually > 0 to avoid dividing by 0).
alpha: An optional `float`. Defaults to `1`.
A scale factor, usually positive.
beta: An optional `float`. Defaults to `0.5`. An exponent.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input_grads`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "LRNGrad",
name, _ctx.post_execution_callbacks, input_grads, input_image,
output_image, "depth_radius", depth_radius, "bias", bias, "alpha",
alpha, "beta", beta)
return _result
except _core._FallbackException:
try:
return lrn_grad_eager_fallback(
input_grads, input_image, output_image, depth_radius=depth_radius,
bias=bias, alpha=alpha, beta=beta, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_, _, _op = _op_def_lib._apply_op_helper(
"LRNGrad", input_grads=input_grads, input_image=input_image,
output_image=output_image, depth_radius=depth_radius,
bias=bias, alpha=alpha, beta=beta, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("depth_radius", _op.get_attr("depth_radius"), "bias",
_op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
_op.get_attr("beta"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"LRNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def LRNGrad(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
return lrn_grad(input_grads=input_grads, input_image=input_image, output_image=output_image, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta, name=name)
LRNGrad.__doc__ = lrn_grad.__doc__
LRNGrad = _doc_controls.do_not_generate_docs(_kwarg_only(LRNGrad))
tf_export("raw_ops.LRNGrad")(LRNGrad)
def lrn_grad_eager_fallback(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lrn_grad
"""
_ctx = ctx if ctx else _context.context()
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input_grads, input_image, output_image], _ctx, _dtypes.float32)
(input_grads, input_image, output_image) = _inputs_T
_inputs_flat = [input_grads, input_image, output_image]
_attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
"beta", beta, "T", _attr_T)
_result = _execute.execute(b"LRNGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"LRNGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def leaky_relu(features, alpha=0.2, name=None):
r"""Computes rectified linear: `max(features, features * alpha)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
alpha: An optional `float`. Defaults to `0.2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"LeakyRelu", name, _ctx.post_execution_callbacks, features, "alpha",
alpha)
return _result
except _core._FallbackException:
try:
return leaky_relu_eager_fallback(
features, alpha=alpha, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_, _, _op = _op_def_lib._apply_op_helper(
"LeakyRelu", features=features, alpha=alpha, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"LeakyRelu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def LeakyRelu(features, alpha=0.2, name=None):
return leaky_relu(features=features, alpha=alpha, name=name)
LeakyRelu.__doc__ = leaky_relu.__doc__
LeakyRelu = _doc_controls.do_not_generate_docs(_kwarg_only(LeakyRelu))
tf_export("raw_ops.LeakyRelu")(LeakyRelu)
def leaky_relu_eager_fallback(features, alpha=0.2, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function leaky_relu
"""
_ctx = ctx if ctx else _context.context()
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx, _dtypes.float32)
_inputs_flat = [features]
_attrs = ("alpha", alpha, "T", _attr_T)
_result = _execute.execute(b"LeakyRelu", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LeakyRelu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def leaky_relu_grad(gradients, features, alpha=0.2, name=None):
r"""Computes rectified linear gradients for a LeakyRelu operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding LeakyRelu operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding LeakyRelu operation,
OR the outputs of that operation (both work equivalently).
alpha: An optional `float`. Defaults to `0.2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"LeakyReluGrad", name, _ctx.post_execution_callbacks, gradients,
features, "alpha", alpha)
return _result
except _core._FallbackException:
try:
return leaky_relu_grad_eager_fallback(
gradients, features, alpha=alpha, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_, _, _op = _op_def_lib._apply_op_helper(
"LeakyReluGrad", gradients=gradients, features=features, alpha=alpha,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"LeakyReluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def LeakyReluGrad(gradients, features, alpha=0.2, name=None):
return leaky_relu_grad(gradients=gradients, features=features, alpha=alpha, name=name)
LeakyReluGrad.__doc__ = leaky_relu_grad.__doc__
LeakyReluGrad = _doc_controls.do_not_generate_docs(_kwarg_only(LeakyReluGrad))
tf_export("raw_ops.LeakyReluGrad")(LeakyReluGrad)
def leaky_relu_grad_eager_fallback(gradients, features, alpha=0.2, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function leaky_relu_grad
"""
_ctx = ctx if ctx else _context.context()
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx, _dtypes.float32)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("alpha", alpha, "T", _attr_T)
_result = _execute.execute(b"LeakyReluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LeakyReluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def log_softmax(logits, name=None):
r"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2-D with shape `[batch_size, num_classes]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"LogSoftmax", name, _ctx.post_execution_callbacks, logits)
return _result
except _core._FallbackException:
try:
return log_softmax_eager_fallback(
logits, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"LogSoftmax", logits=logits, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"LogSoftmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def LogSoftmax(logits, name=None):
return log_softmax(logits=logits, name=name)
LogSoftmax.__doc__ = log_softmax.__doc__
LogSoftmax = _doc_controls.do_not_generate_docs(_kwarg_only(LogSoftmax))
tf_export("raw_ops.LogSoftmax")(LogSoftmax)
def log_softmax_eager_fallback(logits, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function log_softmax
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)
_inputs_flat = [logits]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"LogSoftmax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LogSoftmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool(input, ksize, strides, padding, data_format="NHWC", name=None):
r"""Performs max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
4-D input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "MaxPool",
name, _ctx.post_execution_callbacks, input, "ksize", ksize, "strides",
strides, "padding", padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPool", input=input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"), "data_format",
_op.get_attr("data_format"))
_execute.record_gradient(
"MaxPool", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPool(input, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool(input=input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPool.__doc__ = max_pool.__doc__
MaxPool = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPool))
tf_export("raw_ops.MaxPool")(MaxPool)
def max_pool_eager_fallback(input, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
_result = _execute.execute(b"MaxPool", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPool", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Performs 3D max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPool3D", name, _ctx.post_execution_callbacks, input, "ksize",
ksize, "strides", strides, "padding", padding, "data_format",
data_format)
return _result
except _core._FallbackException:
try:
return max_pool3d_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPool3D", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPool3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPool3D(input, ksize, strides, padding, data_format="NDHWC", name=None):
return max_pool3d(input=input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPool3D.__doc__ = max_pool3d.__doc__
MaxPool3D = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPool3D))
tf_export("raw_ops.MaxPool3D")(MaxPool3D)
def max_pool3d_eager_fallback(input, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool3d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPool3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPool3D", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool3d_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes gradients of max pooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPool3DGrad", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool3d_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPool3DGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"), "TInput", _op._get_attr_type("TInput"))
_execute.record_gradient(
"MaxPool3DGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPool3DGrad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
return max_pool3d_grad(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPool3DGrad.__doc__ = max_pool3d_grad.__doc__
MaxPool3DGrad = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPool3DGrad))
tf_export("raw_ops.MaxPool3DGrad")(MaxPool3DGrad)
def max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool3d_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx, _dtypes.float32)
_attr_TInput, _inputs_TInput = _execute.args_to_matching_eager([orig_input, orig_output], _ctx, _dtypes.float32)
(orig_input, orig_output) = _inputs_TInput
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T, "TInput", _attr_TInput)
_result = _execute.execute(b"MaxPool3DGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPool3DGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool3d_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPool3DGradGrad", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool3d_grad_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPool3DGradGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPool3DGradGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPool3DGradGrad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
return max_pool3d_grad_grad(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPool3DGradGrad.__doc__ = max_pool3d_grad_grad.__doc__
MaxPool3DGradGrad = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPool3DGradGrad))
tf_export("raw_ops.MaxPool3DGradGrad")(MaxPool3DGradGrad)
def max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool3d_grad_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPool3DGradGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPool3DGradGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients w.r.t. the output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGrad", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGrad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool_grad(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPoolGrad.__doc__ = max_pool_grad.__doc__
MaxPoolGrad = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGrad))
tf_export("raw_ops.MaxPoolGrad")(MaxPoolGrad)
def max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx, _dtypes.float32)
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients of gradients w.r.t. the input of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGradGrad", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, "ksize", ksize, "strides", strides, "padding",
padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool_grad_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGradGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGradGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGradGrad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool_grad_grad(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPoolGradGrad.__doc__ = max_pool_grad_grad.__doc__
MaxPoolGradGrad = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGradGrad))
tf_export("raw_ops.MaxPoolGradGrad")(MaxPoolGradGrad)
def max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad_grad
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolGradGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients of gradients w.r.t. the input of `max_pool`.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGradGradV2", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, ksize, strides, "padding", padding, "data_format",
data_format)
return _result
except _core._FallbackException:
try:
return max_pool_grad_grad_v2_eager_fallback(
orig_input, orig_output, grad, ksize, strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGradGradV2", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGradGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGradGradV2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool_grad_grad_v2(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPoolGradGradV2.__doc__ = max_pool_grad_grad_v2.__doc__
MaxPoolGradGradV2 = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGradGradV2))
tf_export("raw_ops.MaxPoolGradGradV2")(MaxPoolGradGradV2)
def max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad_grad_v2
"""
_ctx = ctx if ctx else _context.context()
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx)
(orig_input, orig_output, grad) = _inputs_T
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [orig_input, orig_output, grad, ksize, strides]
_attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGradV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolGradGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input.
grad: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
input of `max_pool`.
argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The indices of the maximum values chosen for each output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGradGradWithArgmax", name, _ctx.post_execution_callbacks,
input, grad, argmax, "ksize", ksize, "strides", strides, "padding",
padding, "include_batch_in_index", include_batch_in_index)
return _result
except _core._FallbackException:
try:
return max_pool_grad_grad_with_argmax_eager_fallback(
input, grad, argmax, ksize=ksize, strides=strides,
padding=padding, include_batch_in_index=include_batch_in_index,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGradGradWithArgmax", input=input, grad=grad, argmax=argmax,
ksize=ksize, strides=strides,
padding=padding,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"include_batch_in_index", _op.get_attr("include_batch_in_index"),
"Targmax", _op._get_attr_type("Targmax"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGradGradWithArgmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
return max_pool_grad_grad_with_argmax(input=input, grad=grad, argmax=argmax, ksize=ksize, strides=strides, padding=padding, include_batch_in_index=include_batch_in_index, name=name)
MaxPoolGradGradWithArgmax.__doc__ = max_pool_grad_grad_with_argmax.__doc__
MaxPoolGradGradWithArgmax = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGradGradWithArgmax))
tf_export("raw_ops.MaxPoolGradGradWithArgmax")(MaxPoolGradGradWithArgmax)
def max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad_grad_with_argmax
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], _ctx)
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], _ctx)
(input, grad) = _inputs_T
_inputs_flat = [input, grad, argmax]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax,
"T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGradWithArgmax", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients w.r.t. the output of `max_pool`.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGradV2", name, _ctx.post_execution_callbacks, orig_input,
orig_output, grad, ksize, strides, "padding", padding, "data_format",
data_format)
return _result
except _core._FallbackException:
try:
return max_pool_grad_v2_eager_fallback(
orig_input, orig_output, grad, ksize, strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGradV2", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGradV2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool_grad_v2(orig_input=orig_input, orig_output=orig_output, grad=grad, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPoolGradV2.__doc__ = max_pool_grad_v2.__doc__
MaxPoolGradV2 = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGradV2))
tf_export("raw_ops.MaxPoolGradV2")(MaxPoolGradV2)
def max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad_v2
"""
_ctx = ctx if ctx else _context.context()
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], _ctx, _dtypes.float32)
(orig_input, orig_output, grad) = _inputs_T
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [orig_input, orig_output, grad, ksize, strides]
_attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolGradV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
r"""Computes gradients of the maxpooling function.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input.
grad: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
output of `max_pool`.
argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The indices of the maximum values chosen for each output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolGradWithArgmax", name, _ctx.post_execution_callbacks, input,
grad, argmax, "ksize", ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index)
return _result
except _core._FallbackException:
try:
return max_pool_grad_with_argmax_eager_fallback(
input, grad, argmax, ksize=ksize, strides=strides,
padding=padding, include_batch_in_index=include_batch_in_index,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolGradWithArgmax", input=input, grad=grad, argmax=argmax,
ksize=ksize, strides=strides,
padding=padding,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"include_batch_in_index", _op.get_attr("include_batch_in_index"),
"Targmax", _op._get_attr_type("Targmax"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolGradWithArgmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
return max_pool_grad_with_argmax(input=input, grad=grad, argmax=argmax, ksize=ksize, strides=strides, padding=padding, include_batch_in_index=include_batch_in_index, name=name)
MaxPoolGradWithArgmax.__doc__ = max_pool_grad_with_argmax.__doc__
MaxPoolGradWithArgmax = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolGradWithArgmax))
tf_export("raw_ops.MaxPoolGradWithArgmax")(MaxPoolGradWithArgmax)
def max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_grad_with_argmax
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], _ctx)
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], _ctx)
(input, grad) = _inputs_T
_inputs_flat = [input, grad, argmax]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax,
"T", _attr_T)
_result = _execute.execute(b"MaxPoolGradWithArgmax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def max_pool_v2(input, ksize, strides, padding, data_format="NHWC", name=None):
r"""Performs max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
4-D input to pool over.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolV2", name, _ctx.post_execution_callbacks, input, ksize,
strides, "padding", padding, "data_format", data_format)
return _result
except _core._FallbackException:
try:
return max_pool_v2_eager_fallback(
input, ksize, strides, padding=padding, data_format=data_format,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolV2", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"))
_execute.record_gradient(
"MaxPoolV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def MaxPoolV2(input, ksize, strides, padding, data_format="NHWC", name=None):
return max_pool_v2(input=input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)
MaxPoolV2.__doc__ = max_pool_v2.__doc__
MaxPoolV2 = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolV2))
tf_export("raw_ops.MaxPoolV2")(MaxPoolV2)
def max_pool_v2_eager_fallback(input, ksize, strides, padding, data_format="NHWC", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_v2
"""
_ctx = ctx if ctx else _context.context()
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.float32)
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [input, ksize, strides]
_attrs = ("T", _attr_T, "padding", padding, "data_format", data_format)
_result = _execute.execute(b"MaxPoolV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolV2", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_max_pool_with_argmax_outputs = ["output", "argmax"]
_MaxPoolWithArgmaxOutput = _collections.namedtuple(
"MaxPoolWithArgmax", _max_pool_with_argmax_outputs)
def max_pool_with_argmax(input, ksize, strides, padding, Targmax=_dtypes.int64, include_batch_in_index=False, name=None):
r"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index:
`(y * width + x) * channels + c` if `include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before flattening,
even if padding is involved and the mathematically correct answer is outside
(either negative or too large). This is a bug, but fixing it is difficult to do
in a safe backwards compatible way, especially due to flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
Targmax: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `Targmax`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"MaxPoolWithArgmax", name, _ctx.post_execution_callbacks, input,
"ksize", ksize, "strides", strides, "Targmax", Targmax, "padding",
padding, "include_batch_in_index", include_batch_in_index)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
except _core._FallbackException:
try:
return max_pool_with_argmax_eager_fallback(
input, ksize=ksize, strides=strides, Targmax=Targmax,
padding=padding, include_batch_in_index=include_batch_in_index,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if Targmax is None:
Targmax = _dtypes.int64
Targmax = _execute.make_type(Targmax, "Targmax")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op = _op_def_lib._apply_op_helper(
"MaxPoolWithArgmax", input=input, ksize=ksize, strides=strides,
padding=padding, Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "Targmax", _op._get_attr_type("Targmax"),
"padding", _op.get_attr("padding"), "include_batch_in_index",
_op.get_attr("include_batch_in_index"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"MaxPoolWithArgmax", _inputs_flat, _attrs, _result, name)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
def MaxPoolWithArgmax(input, ksize, strides, padding, Targmax=_dtypes.int64, include_batch_in_index=False, name=None):
return max_pool_with_argmax(input=input, ksize=ksize, strides=strides, padding=padding, Targmax=Targmax, include_batch_in_index=include_batch_in_index, name=name)
MaxPoolWithArgmax.__doc__ = max_pool_with_argmax.__doc__
MaxPoolWithArgmax = _doc_controls.do_not_generate_docs(_kwarg_only(MaxPoolWithArgmax))
tf_export("raw_ops.MaxPoolWithArgmax")(MaxPoolWithArgmax)
def max_pool_with_argmax_eager_fallback(input, ksize, strides, padding, Targmax=_dtypes.int64, include_batch_in_index=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function max_pool_with_argmax
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if Targmax is None:
Targmax = _dtypes.int64
Targmax = _execute.make_type(Targmax, "Targmax")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "Targmax", Targmax, "padding",
padding, "include_batch_in_index", include_batch_in_index, "T", _attr_T)
_result = _execute.execute(b"MaxPoolWithArgmax", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"MaxPoolWithArgmax", _inputs_flat, _attrs, _result, name)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
def nth_element(input, n, reverse=False, name=None):
r"""Finds values of the `n`-th order statistic for the last dimension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"NthElement", name, _ctx.post_execution_callbacks, input, n,
"reverse", reverse)
return _result
except _core._FallbackException:
try:
return nth_element_eager_fallback(
input, n, reverse=reverse, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op = _op_def_lib._apply_op_helper(
"NthElement", input=input, n=n, reverse=reverse, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("reverse", _op.get_attr("reverse"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"NthElement", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def NthElement(input, n, reverse=False, name=None):
return nth_element(input=input, n=n, reverse=reverse, name=name)
NthElement.__doc__ = nth_element.__doc__
NthElement = _doc_controls.do_not_generate_docs(_kwarg_only(NthElement))
tf_export("raw_ops.NthElement")(NthElement)
def nth_element_eager_fallback(input, n, reverse=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function nth_element
"""
_ctx = ctx if ctx else _context.context()
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
n = _ops.convert_to_tensor(n, _dtypes.int32)
_inputs_flat = [input, n]
_attrs = ("reverse", reverse, "T", _attr_T)
_result = _execute.execute(b"NthElement", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NthElement", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_quantized_avg_pool_outputs = ["output", "min_output", "max_output"]
_QuantizedAvgPoolOutput = _collections.namedtuple(
"QuantizedAvgPool", _quantized_avg_pool_outputs)
def quantized_avg_pool(input, min_input, max_input, ksize, strides, padding, name=None):
r"""Produces the average pool of the input tensor for quantized types.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
4-D with shape `[batch, height, width, channels]`.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
ksize: A list of `ints`.
The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor`. Has the same type as `input`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedAvgPool", name, _ctx.post_execution_callbacks, input,
min_input, max_input, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_avg_pool_eager_fallback(
input, min_input, max_input, ksize=ksize, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedAvgPool", input=input, min_input=min_input,
max_input=max_input, ksize=ksize, strides=strides,
padding=padding, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"QuantizedAvgPool", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
def QuantizedAvgPool(input, min_input, max_input, ksize, strides, padding, name=None):
return quantized_avg_pool(input=input, min_input=min_input, max_input=max_input, ksize=ksize, strides=strides, padding=padding, name=name)
QuantizedAvgPool.__doc__ = quantized_avg_pool.__doc__
QuantizedAvgPool = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedAvgPool))
tf_export("raw_ops.QuantizedAvgPool")(QuantizedAvgPool)
def quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_avg_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
_inputs_flat = [input, min_input, max_input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _execute.execute(b"QuantizedAvgPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedAvgPool", _inputs_flat, _attrs, _result, name)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
_quantized_batch_norm_with_global_normalization_outputs = ["result",
"result_min",
"result_max"]
_QuantizedBatchNormWithGlobalNormalizationOutput = _collections.namedtuple(
"QuantizedBatchNormWithGlobalNormalization",
_quantized_batch_norm_with_global_normalization_outputs)
def quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None):
r"""Quantized Batch normalization.
This op is deprecated and will be removed in the future. Prefer
`tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A 4D input Tensor.
t_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized input.
t_max: A `Tensor` of type `float32`.
The value represented by the highest quantized input.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
m_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized mean.
m_max: A `Tensor` of type `float32`.
The value represented by the highest quantized mean.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
v_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized variance.
v_max: A `Tensor` of type `float32`.
The value represented by the highest quantized variance.
beta: A `Tensor`. Must have the same type as `t`.
A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
beta_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized offset.
beta_max: A `Tensor` of type `float32`.
The value represented by the highest quantized offset.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
gamma_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized gamma.
gamma_max: A `Tensor` of type `float32`.
The value represented by the highest quantized gamma.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result, result_min, result_max).
result: A `Tensor` of type `out_type`.
result_min: A `Tensor` of type `float32`.
result_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedBatchNormWithGlobalNormalization", name,
_ctx.post_execution_callbacks, t, t_min, t_max, m, m_min, m_max, v,
v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
"out_type", out_type, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_batch_norm_with_global_normalization_eager_fallback(
t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min,
beta_max, gamma, gamma_min, gamma_max, out_type=out_type,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
out_type = _execute.make_type(out_type, "out_type")
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedBatchNormWithGlobalNormalization", t=t, t_min=t_min,
t_max=t_max, m=m,
m_min=m_min, m_max=m_max,
v=v, v_min=v_min,
v_max=v_max, beta=beta,
beta_min=beta_min,
beta_max=beta_max,
gamma=gamma,
gamma_min=gamma_min,
gamma_max=gamma_max,
out_type=out_type,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op.get_attr("scale_after_normalization"))
_execute.record_gradient(
"QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
def QuantizedBatchNormWithGlobalNormalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None):
return quantized_batch_norm_with_global_normalization(t=t, t_min=t_min, t_max=t_max, m=m, m_min=m_min, m_max=m_max, v=v, v_min=v_min, v_max=v_max, beta=beta, beta_min=beta_min, beta_max=beta_max, gamma=gamma, gamma_min=gamma_min, gamma_max=gamma_max, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization, name=name)
QuantizedBatchNormWithGlobalNormalization.__doc__ = quantized_batch_norm_with_global_normalization.__doc__
QuantizedBatchNormWithGlobalNormalization = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedBatchNormWithGlobalNormalization))
tf_export("raw_ops.QuantizedBatchNormWithGlobalNormalization")(QuantizedBatchNormWithGlobalNormalization)
def quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_batch_norm_with_global_normalization
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_Tinput, _inputs_Tinput = _execute.args_to_matching_eager([t, m, v, beta, gamma], _ctx)
(t, m, v, beta, gamma) = _inputs_Tinput
t_min = _ops.convert_to_tensor(t_min, _dtypes.float32)
t_max = _ops.convert_to_tensor(t_max, _dtypes.float32)
m_min = _ops.convert_to_tensor(m_min, _dtypes.float32)
m_max = _ops.convert_to_tensor(m_max, _dtypes.float32)
v_min = _ops.convert_to_tensor(v_min, _dtypes.float32)
v_max = _ops.convert_to_tensor(v_max, _dtypes.float32)
beta_min = _ops.convert_to_tensor(beta_min, _dtypes.float32)
beta_max = _ops.convert_to_tensor(beta_max, _dtypes.float32)
gamma_min = _ops.convert_to_tensor(gamma_min, _dtypes.float32)
gamma_max = _ops.convert_to_tensor(gamma_max, _dtypes.float32)
_inputs_flat = [t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type, "variance_epsilon",
variance_epsilon, "scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"QuantizedBatchNormWithGlobalNormalization", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result, name)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
_quantized_bias_add_outputs = ["output", "min_out", "max_out"]
_QuantizedBiasAddOutput = _collections.namedtuple(
"QuantizedBiasAdd", _quantized_bias_add_outputs)
def quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None):
r"""Adds Tensor 'bias' to Tensor 'input' for Quantized types.
Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A 1D bias Tensor with size matching the last dimension of 'input'.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
min_bias: A `Tensor` of type `float32`.
The float value that the lowest quantized bias value represents.
max_bias: A `Tensor` of type `float32`.
The float value that the highest quantized bias value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_out, max_out).
output: A `Tensor` of type `out_type`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedBiasAdd", name, _ctx.post_execution_callbacks, input, bias,
min_input, max_input, min_bias, max_bias, "out_type", out_type)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_bias_add_eager_fallback(
input, bias, min_input, max_input, min_bias, max_bias,
out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedBiasAdd", input=input, bias=bias, min_input=min_input,
max_input=max_input, min_bias=min_bias,
max_bias=max_bias, out_type=out_type, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"out_type", _op._get_attr_type("out_type"))
_execute.record_gradient(
"QuantizedBiasAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
def QuantizedBiasAdd(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None):
return quantized_bias_add(input=input, bias=bias, min_input=min_input, max_input=max_input, min_bias=min_bias, max_bias=max_bias, out_type=out_type, name=name)
QuantizedBiasAdd.__doc__ = quantized_bias_add.__doc__
QuantizedBiasAdd = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedBiasAdd))
tf_export("raw_ops.QuantizedBiasAdd")(QuantizedBiasAdd)
def quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_bias_add
"""
_ctx = ctx if ctx else _context.context()
out_type = _execute.make_type(out_type, "out_type")
_attr_T1, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_T2, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_bias = _ops.convert_to_tensor(min_bias, _dtypes.float32)
max_bias = _ops.convert_to_tensor(max_bias, _dtypes.float32)
_inputs_flat = [input, bias, min_input, max_input, min_bias, max_bias]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "out_type", out_type)
_result = _execute.execute(b"QuantizedBiasAdd", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedBiasAdd", _inputs_flat, _attrs, _result, name)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
_quantized_conv2d_outputs = ["output", "min_output", "max_output"]
_QuantizedConv2DOutput = _collections.namedtuple(
"QuantizedConv2D", _quantized_conv2d_outputs)
def quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2D convolution given quantized 4D input and filter tensors.
The inputs are quantized tensors where the lowest value represents the real
number of the associated minimum, and the highest represents the maximum.
This means that you can only interpret the quantized output in the same way, by
taking the returned minimum and maximum values into account.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter's input_depth dimension must match input's depth dimensions.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the lowest quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the highest quantized filter value represents.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2D", name, _ctx.post_execution_callbacks, input, filter,
min_input, max_input, min_filter, max_filter, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations)
_result = _QuantizedConv2DOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2D", input=input, filter=filter, min_input=min_input,
max_input=max_input, min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedConv2D", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DOutput._make(_result)
return _result
def QuantizedConv2D(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
return quantized_conv2d(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedConv2D.__doc__ = quantized_conv2d.__doc__
QuantizedConv2D = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2D))
tf_export("raw_ops.QuantizedConv2D")(QuantizedConv2D)
def quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedConv2D", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedConv2D", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DOutput._make(_result)
return _result
_quantized_conv2d_and_relu_outputs = ["output", "min_output", "max_output"]
_QuantizedConv2DAndReluOutput = _collections.namedtuple(
"QuantizedConv2DAndRelu", _quantized_conv2d_and_relu_outputs)
def quantized_conv2d_and_relu(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DAndRelu", name, _ctx.post_execution_callbacks, input,
filter, min_input, max_input, min_filter, max_filter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations",
dilations, "padding_list", padding_list)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_and_relu_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DAndRelu", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
def QuantizedConv2DAndRelu(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_and_relu(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DAndRelu.__doc__ = quantized_conv2d_and_relu.__doc__
QuantizedConv2DAndRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DAndRelu))
tf_export("raw_ops.QuantizedConv2DAndRelu")(QuantizedConv2DAndRelu)
def quantized_conv2d_and_relu_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_and_relu
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
_quantized_conv2d_and_relu_and_requantize_outputs = ["output", "min_output",
"max_output"]
_QuantizedConv2DAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DAndReluAndRequantize",
_quantized_conv2d_and_relu_and_requantize_outputs)
def quantized_conv2d_and_relu_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DAndReluAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, min_input, max_input,
min_filter, max_filter, min_freezed_output, max_freezed_output,
"out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_and_relu_and_requantize_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DAndReluAndRequantize", input=input, filter=filter,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DAndReluAndRequantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_and_relu_and_requantize(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DAndReluAndRequantize.__doc__ = quantized_conv2d_and_relu_and_requantize.__doc__
QuantizedConv2DAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DAndReluAndRequantize))
tf_export("raw_ops.QuantizedConv2DAndReluAndRequantize")(QuantizedConv2DAndReluAndRequantize)
def quantized_conv2d_and_relu_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndReluAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_conv2d_and_requantize_outputs = ["output", "min_output",
"max_output"]
_QuantizedConv2DAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DAndRequantize", _quantized_conv2d_and_requantize_outputs)
def quantized_conv2d_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DAndRequantize", name, _ctx.post_execution_callbacks,
input, filter, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_and_requantize_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DAndRequantize", input=input, filter=filter,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DAndRequantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_and_requantize(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DAndRequantize.__doc__ = quantized_conv2d_and_requantize.__doc__
QuantizedConv2DAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DAndRequantize))
tf_export("raw_ops.QuantizedConv2DAndRequantize")(QuantizedConv2DAndRequantize)
def quantized_conv2d_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
_quantized_conv2d_per_channel_outputs = ["output", "min_output", "max_output"]
_QuantizedConv2DPerChannelOutput = _collections.namedtuple(
"QuantizedConv2DPerChannel", _quantized_conv2d_per_channel_outputs)
def quantized_conv2d_per_channel(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes QuantizedConv2D per channel.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
min_input: A `Tensor` of type `float32`.
The minimum value of the input tensor
max_input: A `Tensor` of type `float32`.
The maximum value of the input tensor.
min_filter: A `Tensor` of type `float32`.
The minimum value of the filter tensor.
max_filter: A `Tensor` of type `float32`.
The maximum value of the filter tensor.
strides: A list of `ints`. list of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The quantized type of output tensor that needs to be converted.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
list of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DPerChannel", name, _ctx.post_execution_callbacks,
input, filter, min_input, max_input, min_filter, max_filter,
"out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_per_channel_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DPerChannel", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
def QuantizedConv2DPerChannel(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
return quantized_conv2d_per_channel(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedConv2DPerChannel.__doc__ = quantized_conv2d_per_channel.__doc__
QuantizedConv2DPerChannel = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DPerChannel))
tf_export("raw_ops.QuantizedConv2DPerChannel")(QuantizedConv2DPerChannel)
def quantized_conv2d_per_channel_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_per_channel
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedConv2DPerChannel", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
_quantized_conv2d_with_bias_outputs = ["output", "min_output", "max_output"]
_QuantizedConv2DWithBiasOutput = _collections.namedtuple(
"QuantizedConv2DWithBias", _quantized_conv2d_with_bias_outputs)
def quantized_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBias", name, _ctx.post_execution_callbacks, input,
filter, bias, min_input, max_input, min_filter, max_filter,
"out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBias", input=input, filter=filter, bias=bias,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
def QuantizedConv2DWithBias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBias.__doc__ = quantized_conv2d_with_bias.__doc__
QuantizedConv2DWithBias = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBias))
tf_export("raw_ops.QuantizedConv2DWithBias")(QuantizedConv2DWithBias)
def quantized_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
_quantized_conv2d_with_bias_and_relu_outputs = ["output", "min_output",
"max_output"]
_QuantizedConv2DWithBiasAndReluOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasAndRelu",
_quantized_conv2d_with_bias_and_relu_outputs)
def quantized_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasAndRelu", name, _ctx.post_execution_callbacks,
input, filter, bias, min_input, max_input, min_filter, max_filter,
"out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasAndRelu", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
def QuantizedConv2DWithBiasAndRelu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_and_relu(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasAndRelu.__doc__ = quantized_conv2d_with_bias_and_relu.__doc__
QuantizedConv2DWithBiasAndRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasAndRelu))
tf_export("raw_ops.QuantizedConv2DWithBiasAndRelu")(QuantizedConv2DWithBiasAndRelu)
def quantized_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_and_relu
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
_quantized_conv2d_with_bias_and_relu_and_requantize_outputs = ["output",
"min_output",
"max_output"]
_QuantizedConv2DWithBiasAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasAndReluAndRequantize",
_quantized_conv2d_with_bias_and_relu_and_requantize_outputs)
def quantized_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasAndReluAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DWithBiasAndReluAndRequantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_and_relu_and_requantize(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasAndReluAndRequantize.__doc__ = quantized_conv2d_with_bias_and_relu_and_requantize.__doc__
QuantizedConv2DWithBiasAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasAndReluAndRequantize))
tf_export("raw_ops.QuantizedConv2DWithBiasAndReluAndRequantize")(QuantizedConv2DWithBiasAndReluAndRequantize)
def quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_conv2d_with_bias_and_requantize_outputs = ["output", "min_output",
"max_output"]
_QuantizedConv2DWithBiasAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasAndRequantize",
_quantized_conv2d_with_bias_and_requantize_outputs)
def quantized_conv2d_with_bias_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasAndRequantize", input=input, filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DWithBiasAndRequantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_and_requantize(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasAndRequantize.__doc__ = quantized_conv2d_with_bias_and_requantize.__doc__
QuantizedConv2DWithBiasAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasAndRequantize))
tf_export("raw_ops.QuantizedConv2DWithBiasAndRequantize")(QuantizedConv2DWithBiasAndRequantize)
def quantized_conv2d_with_bias_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
_quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_outputs = ["output",
"min_output",
"max_output"]
_QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
_quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_outputs)
def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_summand: A `Tensor` of type `float32`.
max_summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, summand, min_summand, max_summand, "out_type",
out_type, "strides", strides, "padding", padding, "dilations",
dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
summand=summand,
min_summand=min_summand,
max_summand=max_summand,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "Tsummand",
_op._get_attr_type("Tsummand"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DWithBiasSignedSumAndReluAndRequantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, summand=summand, min_summand=min_summand, max_summand=max_summand, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasSignedSumAndReluAndRequantize.__doc__ = quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize.__doc__
QuantizedConv2DWithBiasSignedSumAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasSignedSumAndReluAndRequantize))
tf_export("raw_ops.QuantizedConv2DWithBiasSignedSumAndReluAndRequantize")(QuantizedConv2DWithBiasSignedSumAndReluAndRequantize)
def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
_attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32)
max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_conv2d_with_bias_sum_and_relu_outputs = ["output", "min_output",
"max_output"]
_QuantizedConv2DWithBiasSumAndReluOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasSumAndRelu",
_quantized_conv2d_with_bias_sum_and_relu_outputs)
def quantized_conv2d_with_bias_sum_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasSumAndRelu", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, summand, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_sum_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasSumAndRelu", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
summand=summand, strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
def QuantizedConv2DWithBiasSumAndRelu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_sum_and_relu(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, summand=summand, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasSumAndRelu.__doc__ = quantized_conv2d_with_bias_sum_and_relu.__doc__
QuantizedConv2DWithBiasSumAndRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasSumAndRelu))
tf_export("raw_ops.QuantizedConv2DWithBiasSumAndRelu")(QuantizedConv2DWithBiasSumAndRelu)
def quantized_conv2d_with_bias_sum_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_sum_and_relu
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
summand = _ops.convert_to_tensor(summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSumAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
_quantized_conv2d_with_bias_sum_and_relu_and_requantize_outputs = ["output",
"min_output",
"max_output"]
_QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedConv2DWithBiasSumAndReluAndRequantize",
_quantized_conv2d_with_bias_sum_and_relu_and_requantize_outputs)
def quantized_conv2d_with_bias_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_summand: A `Tensor` of type `float32`.
max_summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedConv2DWithBiasSumAndReluAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, summand, min_summand, max_summand, "out_type",
out_type, "strides", strides, "padding", padding, "dilations",
dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
summand=summand,
min_summand=min_summand,
max_summand=max_summand,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "Tsummand",
_op._get_attr_type("Tsummand"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedConv2DWithBiasSumAndReluAndRequantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
return quantized_conv2d_with_bias_sum_and_relu_and_requantize(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, summand=summand, min_summand=min_summand, max_summand=max_summand, strides=strides, padding=padding, out_type=out_type, dilations=dilations, padding_list=padding_list, name=name)
QuantizedConv2DWithBiasSumAndReluAndRequantize.__doc__ = quantized_conv2d_with_bias_sum_and_relu_and_requantize.__doc__
QuantizedConv2DWithBiasSumAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedConv2DWithBiasSumAndReluAndRequantize))
tf_export("raw_ops.QuantizedConv2DWithBiasSumAndReluAndRequantize")(QuantizedConv2DWithBiasSumAndReluAndRequantize)
def quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_conv2d_with_bias_sum_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
_attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32)
max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSumAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_depthwise_conv2d_outputs = ["output", "min_output", "max_output"]
_QuantizedDepthwiseConv2DOutput = _collections.namedtuple(
"QuantizedDepthwiseConv2D", _quantized_depthwise_conv2d_outputs)
def quantized_depthwise_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedDepthwiseConv2D", name, _ctx.post_execution_callbacks,
input, filter, min_input, max_input, min_filter, max_filter,
"out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_depthwise_conv2d_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedDepthwiseConv2D", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
def QuantizedDepthwiseConv2D(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
return quantized_depthwise_conv2d(input=input, filter=filter, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedDepthwiseConv2D.__doc__ = quantized_depthwise_conv2d.__doc__
QuantizedDepthwiseConv2D = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedDepthwiseConv2D))
tf_export("raw_ops.QuantizedDepthwiseConv2D")(QuantizedDepthwiseConv2D)
def quantized_depthwise_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_depthwise_conv2d
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2D", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
_quantized_depthwise_conv2d_with_bias_outputs = ["output", "min_output",
"max_output"]
_QuantizedDepthwiseConv2DWithBiasOutput = _collections.namedtuple(
"QuantizedDepthwiseConv2DWithBias",
_quantized_depthwise_conv2d_with_bias_outputs)
def quantized_depthwise_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D with Bias.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor` of type `float32`. The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedDepthwiseConv2DWithBias", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_depthwise_conv2d_with_bias_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedDepthwiseConv2DWithBias", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
def QuantizedDepthwiseConv2DWithBias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
return quantized_depthwise_conv2d_with_bias(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedDepthwiseConv2DWithBias.__doc__ = quantized_depthwise_conv2d_with_bias.__doc__
QuantizedDepthwiseConv2DWithBias = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedDepthwiseConv2DWithBias))
tf_export("raw_ops.QuantizedDepthwiseConv2DWithBias")(QuantizedDepthwiseConv2DWithBias)
def quantized_depthwise_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_depthwise_conv2d_with_bias
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
_quantized_depthwise_conv2d_with_bias_and_relu_outputs = ["output",
"min_output",
"max_output"]
_QuantizedDepthwiseConv2DWithBiasAndReluOutput = _collections.namedtuple(
"QuantizedDepthwiseConv2DWithBiasAndRelu",
_quantized_depthwise_conv2d_with_bias_and_relu_outputs)
def quantized_depthwise_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D with Bias and Relu.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor` of type `float32`. The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedDepthwiseConv2DWithBiasAndRelu", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedDepthwiseConv2DWithBiasAndRelu", input=input, filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
def QuantizedDepthwiseConv2DWithBiasAndRelu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
return quantized_depthwise_conv2d_with_bias_and_relu(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedDepthwiseConv2DWithBiasAndRelu.__doc__ = quantized_depthwise_conv2d_with_bias_and_relu.__doc__
QuantizedDepthwiseConv2DWithBiasAndRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedDepthwiseConv2DWithBiasAndRelu))
tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndRelu")(QuantizedDepthwiseConv2DWithBiasAndRelu)
def quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_depthwise_conv2d_with_bias_and_relu
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
_quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_outputs = ["output",
"min_output",
"max_output"]
_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
_quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_outputs)
def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
min_freezed_output: A `Tensor` of type `float32`.
The minimum float value of the output tensor.
max_freezed_output: A `Tensor` of type `float32`.
The maximum float value of the output tensor.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name,
_ctx.post_execution_callbacks, input, filter, bias, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], name=None):
return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input=input, filter=filter, bias=bias, min_input=min_input, max_input=max_input, min_filter=min_filter, max_filter=max_filter, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, strides=strides, padding=padding, out_type=out_type, dilations=dilations, name=name)
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.__doc__ = quantized_depthwise_conv2d_with_bias_and_relu_and_requantize.__doc__
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize))
tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize")(QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize)
def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_depthwise_conv2d_with_bias_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_mat_mul_with_bias_outputs = ["out", "min_out", "max_out"]
_QuantizedMatMulWithBiasOutput = _collections.namedtuple(
"QuantizedMatMulWithBias", _quantized_mat_mul_with_bias_outputs)
def quantized_mat_mul_with_bias(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Performs a quantized matrix multiplication of `a` by the matrix `b` with bias
add.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
mulplication result. The bias size must match inner dimension of `b`.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
A 1D bias tensor with size matching inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedMatMulWithBias", name, _ctx.post_execution_callbacks, a, b,
bias, min_a, max_a, min_b, max_b, "Toutput", Toutput, "transpose_a",
transpose_a, "transpose_b", transpose_b, "input_quant_mode",
input_quant_mode)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_mat_mul_with_bias_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
input_quant_mode=input_quant_mode, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMatMulWithBias", a=a, b=b, bias=bias, min_a=min_a,
max_a=max_a, min_b=min_b, max_b=max_b,
Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_execute.record_gradient(
"QuantizedMatMulWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
def QuantizedMatMulWithBias(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
return quantized_mat_mul_with_bias(a=a, b=b, bias=bias, min_a=min_a, max_a=max_a, min_b=min_b, max_b=max_b, Toutput=Toutput, transpose_a=transpose_a, transpose_b=transpose_b, input_quant_mode=input_quant_mode, name=name)
QuantizedMatMulWithBias.__doc__ = quantized_mat_mul_with_bias.__doc__
QuantizedMatMulWithBias = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedMatMulWithBias))
tf_export("raw_ops.QuantizedMatMulWithBias")(QuantizedMatMulWithBias)
def quantized_mat_mul_with_bias_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mat_mul_with_bias
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
_attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedMatMulWithBias", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
_quantized_mat_mul_with_bias_and_relu_outputs = ["out", "min_out", "max_out"]
_QuantizedMatMulWithBiasAndReluOutput = _collections.namedtuple(
"QuantizedMatMulWithBiasAndRelu",
_quantized_mat_mul_with_bias_and_relu_outputs)
def quantized_mat_mul_with_bias_and_relu(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias
add and relu fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
mulplication result. The bias size must match inner dimension of `b`. Then do
relu activation to get non-negative result.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor` of type `float32`.
A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedMatMulWithBiasAndRelu", name, _ctx.post_execution_callbacks,
a, b, bias, min_a, max_a, min_b, max_b, "Toutput", Toutput,
"transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_mat_mul_with_bias_and_relu_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
input_quant_mode=input_quant_mode, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMatMulWithBiasAndRelu", a=a, b=b, bias=bias, min_a=min_a,
max_a=max_a, min_b=min_b,
max_b=max_b, Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Toutput", _op._get_attr_type("Toutput"), "transpose_a",
_op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
def QuantizedMatMulWithBiasAndRelu(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
return quantized_mat_mul_with_bias_and_relu(a=a, b=b, bias=bias, min_a=min_a, max_a=max_a, min_b=min_b, max_b=max_b, Toutput=Toutput, transpose_a=transpose_a, transpose_b=transpose_b, input_quant_mode=input_quant_mode, name=name)
QuantizedMatMulWithBiasAndRelu.__doc__ = quantized_mat_mul_with_bias_and_relu.__doc__
QuantizedMatMulWithBiasAndRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedMatMulWithBiasAndRelu))
tf_export("raw_ops.QuantizedMatMulWithBiasAndRelu")(QuantizedMatMulWithBiasAndRelu)
def quantized_mat_mul_with_bias_and_relu_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mat_mul_with_bias_and_relu
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
_attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a",
transpose_a, "transpose_b", transpose_b, "input_quant_mode",
input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
_quantized_mat_mul_with_bias_and_relu_and_requantize_outputs = ["out",
"min_out",
"max_out"]
_QuantizedMatMulWithBiasAndReluAndRequantizeOutput = _collections.namedtuple(
"QuantizedMatMulWithBiasAndReluAndRequantize",
_quantized_mat_mul_with_bias_and_relu_and_requantize_outputs)
def quantized_mat_mul_with_bias_and_relu_and_requantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias
add and relu and requantize fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
mulplication result. The bias size must match inner dimension of `b`. Then do
relu activation to get non-negative result. Then do requantize operation to get
final uint8 result.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
min_freezed_output: A `Tensor` of type `float32`.
The float value that the highest quantized output value after requantize.
max_freezed_output: A `Tensor` of type `float32`.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedMatMulWithBiasAndReluAndRequantize", name,
_ctx.post_execution_callbacks, a, b, bias, min_a, max_a, min_b, max_b,
min_freezed_output, max_freezed_output, "Toutput", Toutput,
"transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output,
max_freezed_output, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, input_quant_mode=input_quant_mode,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMatMulWithBiasAndReluAndRequantize", a=a, b=b, bias=bias,
min_a=min_a,
max_a=max_a,
min_b=min_b,
max_b=max_b,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op.get_attr("transpose_a"), "transpose_b",
_op.get_attr("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_execute.record_gradient(
"QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
def QuantizedMatMulWithBiasAndReluAndRequantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
return quantized_mat_mul_with_bias_and_relu_and_requantize(a=a, b=b, bias=bias, min_a=min_a, max_a=max_a, min_b=min_b, max_b=max_b, min_freezed_output=min_freezed_output, max_freezed_output=max_freezed_output, Toutput=Toutput, transpose_a=transpose_a, transpose_b=transpose_b, input_quant_mode=input_quant_mode, name=name)
QuantizedMatMulWithBiasAndReluAndRequantize.__doc__ = quantized_mat_mul_with_bias_and_relu_and_requantize.__doc__
QuantizedMatMulWithBiasAndReluAndRequantize = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedMatMulWithBiasAndReluAndRequantize))
tf_export("raw_ops.QuantizedMatMulWithBiasAndReluAndRequantize")(QuantizedMatMulWithBiasAndReluAndRequantize)
def quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_mat_mul_with_bias_and_relu_and_requantize
"""
_ctx = ctx if ctx else _context.context()
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
_attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], _ctx)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_quantized_max_pool_outputs = ["output", "min_output", "max_output"]
_QuantizedMaxPoolOutput = _collections.namedtuple(
"QuantizedMaxPool", _quantized_max_pool_outputs)
def quantized_max_pool(input, min_input, max_input, ksize, strides, padding, name=None):
r"""Produces the max pool of the input tensor for quantized types.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
ksize: A list of `ints`.
The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor`. Has the same type as `input`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedMaxPool", name, _ctx.post_execution_callbacks, input,
min_input, max_input, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_max_pool_eager_fallback(
input, min_input, max_input, ksize=ksize, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedMaxPool", input=input, min_input=min_input,
max_input=max_input, ksize=ksize, strides=strides,
padding=padding, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_execute.record_gradient(
"QuantizedMaxPool", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
def QuantizedMaxPool(input, min_input, max_input, ksize, strides, padding, name=None):
return quantized_max_pool(input=input, min_input=min_input, max_input=max_input, ksize=ksize, strides=strides, padding=padding, name=name)
QuantizedMaxPool.__doc__ = quantized_max_pool.__doc__
QuantizedMaxPool = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedMaxPool))
tf_export("raw_ops.QuantizedMaxPool")(QuantizedMaxPool)
def quantized_max_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_max_pool
"""
_ctx = ctx if ctx else _context.context()
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
_inputs_flat = [input, min_input, max_input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _execute.execute(b"QuantizedMaxPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedMaxPool", _inputs_flat, _attrs, _result, name)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
_quantized_relu_outputs = ["activations", "min_activations",
"max_activations"]
_QuantizedReluOutput = _collections.namedtuple(
"QuantizedRelu", _quantized_relu_outputs)
def quantized_relu(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear: `max(features, 0)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedRelu", name, _ctx.post_execution_callbacks, features,
min_features, max_features, "out_type", out_type)
_result = _QuantizedReluOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_relu_eager_fallback(
features, min_features, max_features, out_type=out_type,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedRelu", features=features, min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_execute.record_gradient(
"QuantizedRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedReluOutput._make(_result)
return _result
def QuantizedRelu(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
return quantized_relu(features=features, min_features=min_features, max_features=max_features, out_type=out_type, name=name)
QuantizedRelu.__doc__ = quantized_relu.__doc__
QuantizedRelu = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedRelu))
tf_export("raw_ops.QuantizedRelu")(QuantizedRelu)
def quantized_relu_eager_fallback(features, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_relu
"""
_ctx = ctx if ctx else _context.context()
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedRelu", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedRelu", _inputs_flat, _attrs, _result, name)
_result = _QuantizedReluOutput._make(_result)
return _result
_quantized_relu6_outputs = ["activations", "min_activations",
"max_activations"]
_QuantizedRelu6Output = _collections.namedtuple(
"QuantizedRelu6", _quantized_relu6_outputs)
def quantized_relu6(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedRelu6", name, _ctx.post_execution_callbacks, features,
min_features, max_features, "out_type", out_type)
_result = _QuantizedRelu6Output._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_relu6_eager_fallback(
features, min_features, max_features, out_type=out_type,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedRelu6", features=features, min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_execute.record_gradient(
"QuantizedRelu6", _inputs_flat, _attrs, _result, name)
_result = _QuantizedRelu6Output._make(_result)
return _result
def QuantizedRelu6(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
return quantized_relu6(features=features, min_features=min_features, max_features=max_features, out_type=out_type, name=name)
QuantizedRelu6.__doc__ = quantized_relu6.__doc__
QuantizedRelu6 = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedRelu6))
tf_export("raw_ops.QuantizedRelu6")(QuantizedRelu6)
def quantized_relu6_eager_fallback(features, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_relu6
"""
_ctx = ctx if ctx else _context.context()
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedRelu6", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedRelu6", _inputs_flat, _attrs, _result, name)
_result = _QuantizedRelu6Output._make(_result)
return _result
_quantized_relu_x_outputs = ["activations", "min_activations",
"max_activations"]
_QuantizedReluXOutput = _collections.namedtuple(
"QuantizedReluX", _quantized_relu_x_outputs)
def quantized_relu_x(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
max_value: A `Tensor` of type `float32`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"QuantizedReluX", name, _ctx.post_execution_callbacks, features,
max_value, min_features, max_features, "out_type", out_type)
_result = _QuantizedReluXOutput._make(_result)
return _result
except _core._FallbackException:
try:
return quantized_relu_x_eager_fallback(
features, max_value, min_features, max_features,
out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op = _op_def_lib._apply_op_helper(
"QuantizedReluX", features=features, max_value=max_value,
min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_execute.record_gradient(
"QuantizedReluX", _inputs_flat, _attrs, _result, name)
_result = _QuantizedReluXOutput._make(_result)
return _result
def QuantizedReluX(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None):
return quantized_relu_x(features=features, max_value=max_value, min_features=min_features, max_features=max_features, out_type=out_type, name=name)
QuantizedReluX.__doc__ = quantized_relu_x.__doc__
QuantizedReluX = _doc_controls.do_not_generate_docs(_kwarg_only(QuantizedReluX))
tf_export("raw_ops.QuantizedReluX")(QuantizedReluX)
def quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function quantized_relu_x
"""
_ctx = ctx if ctx else _context.context()
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], _ctx)
max_value = _ops.convert_to_tensor(max_value, _dtypes.float32)
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, max_value, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedReluX", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"QuantizedReluX", _inputs_flat, _attrs, _result, name)
_result = _QuantizedReluXOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.relu')
def relu(features, name=None):
r"""Computes rectified linear: `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Relu",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return relu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
relu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Relu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
relu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Relu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Relu(features, name=None):
return relu(features=features, name=name)
Relu.__doc__ = relu.__doc__
Relu = _doc_controls.do_not_generate_docs(_kwarg_only(Relu))
tf_export("raw_ops.Relu")(Relu)
def relu_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function relu
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Relu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def relu6(features, name=None):
r"""Computes rectified linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Relu6",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return relu6_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"Relu6", features=features, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Relu6", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Relu6(features, name=None):
return relu6(features=features, name=name)
Relu6.__doc__ = relu6.__doc__
Relu6 = _doc_controls.do_not_generate_docs(_kwarg_only(Relu6))
tf_export("raw_ops.Relu6")(Relu6)
def relu6_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function relu6
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu6", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Relu6", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def relu6_grad(gradients, features, name=None):
r"""Computes rectified linear 6 gradients for a Relu6 operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The backpropagated gradients to the corresponding Relu6 operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding Relu6 operation, or
its output; using either one produces the same result.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"Relu6Grad", name, _ctx.post_execution_callbacks, gradients, features)
return _result
except _core._FallbackException:
try:
return relu6_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"Relu6Grad", gradients=gradients, features=features, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Relu6Grad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Relu6Grad(gradients, features, name=None):
return relu6_grad(gradients=gradients, features=features, name=name)
Relu6Grad.__doc__ = relu6_grad.__doc__
Relu6Grad = _doc_controls.do_not_generate_docs(_kwarg_only(Relu6Grad))
tf_export("raw_ops.Relu6Grad")(Relu6Grad)
def relu6_grad_eager_fallback(gradients, features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function relu6_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu6Grad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Relu6Grad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def relu_grad(gradients, features, name=None):
r"""Computes rectified linear gradients for a Relu operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The backpropagated gradients to the corresponding Relu operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding Relu operation, OR
the outputs of that operation (both work equivalently).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "ReluGrad",
name, _ctx.post_execution_callbacks, gradients, features)
return _result
except _core._FallbackException:
try:
return relu_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"ReluGrad", gradients=gradients, features=features, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"ReluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def ReluGrad(gradients, features, name=None):
return relu_grad(gradients=gradients, features=features, name=name)
ReluGrad.__doc__ = relu_grad.__doc__
ReluGrad = _doc_controls.do_not_generate_docs(_kwarg_only(ReluGrad))
tf_export("raw_ops.ReluGrad")(ReluGrad)
def relu_grad_eager_fallback(gradients, features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function relu_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ReluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"ReluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.selu')
def selu(features, name=None):
r"""Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
if < 0, `scale * features` otherwise.
To be used together with
`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
For correct dropout, use `tf.contrib.nn.alpha_dropout`.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Selu",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return selu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
selu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Selu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
selu, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Selu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Selu(features, name=None):
return selu(features=features, name=name)
Selu.__doc__ = selu.__doc__
Selu = _doc_controls.do_not_generate_docs(_kwarg_only(Selu))
tf_export("raw_ops.Selu")(Selu)
def selu_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function selu
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Selu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Selu", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def selu_grad(gradients, outputs, name=None):
r"""Computes gradients for the scaled exponential linear (Selu) operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding Selu operation.
outputs: A `Tensor`. Must have the same type as `gradients`.
The outputs of the corresponding Selu operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "SeluGrad",
name, _ctx.post_execution_callbacks, gradients, outputs)
return _result
except _core._FallbackException:
try:
return selu_grad_eager_fallback(
gradients, outputs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"SeluGrad", gradients=gradients, outputs=outputs, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"SeluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def SeluGrad(gradients, outputs, name=None):
return selu_grad(gradients=gradients, outputs=outputs, name=name)
SeluGrad.__doc__ = selu_grad.__doc__
SeluGrad = _doc_controls.do_not_generate_docs(_kwarg_only(SeluGrad))
tf_export("raw_ops.SeluGrad")(SeluGrad)
def selu_grad_eager_fallback(gradients, outputs, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function selu_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], _ctx)
(gradients, outputs) = _inputs_T
_inputs_flat = [gradients, outputs]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SeluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SeluGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def softmax(logits, name=None):
r"""Computes softmax activations.
For each batch `i` and class `j` we have
$$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2-D with shape `[batch_size, num_classes]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Softmax",
name, _ctx.post_execution_callbacks, logits)
return _result
except _core._FallbackException:
try:
return softmax_eager_fallback(
logits, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"Softmax", logits=logits, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Softmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Softmax(logits, name=None):
return softmax(logits=logits, name=name)
Softmax.__doc__ = softmax.__doc__
Softmax = _doc_controls.do_not_generate_docs(_kwarg_only(Softmax))
tf_export("raw_ops.Softmax")(Softmax)
def softmax_eager_fallback(logits, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softmax
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)
_inputs_flat = [logits]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softmax", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"Softmax", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_softmax_cross_entropy_with_logits_outputs = ["loss", "backprop"]
_SoftmaxCrossEntropyWithLogitsOutput = _collections.namedtuple(
"SoftmaxCrossEntropyWithLogits",
_softmax_cross_entropy_with_logits_outputs)
def softmax_cross_entropy_with_logits(features, labels, name=None):
r"""Computes softmax cross entropy cost and gradients to backpropagate.
Inputs are the logits, not probabilities.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
batch_size x num_classes matrix
labels: A `Tensor`. Must have the same type as `features`.
batch_size x num_classes matrix
The caller must ensure that each batch of labels represents a valid
probability distribution.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (loss, backprop).
loss: A `Tensor`. Has the same type as `features`.
backprop: A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"SoftmaxCrossEntropyWithLogits", name, _ctx.post_execution_callbacks,
features, labels)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
except _core._FallbackException:
try:
return softmax_cross_entropy_with_logits_eager_fallback(
features, labels, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"SoftmaxCrossEntropyWithLogits", features=features, labels=labels,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
def SoftmaxCrossEntropyWithLogits(features, labels, name=None):
return softmax_cross_entropy_with_logits(features=features, labels=labels, name=name)
SoftmaxCrossEntropyWithLogits.__doc__ = softmax_cross_entropy_with_logits.__doc__
SoftmaxCrossEntropyWithLogits = _doc_controls.do_not_generate_docs(_kwarg_only(SoftmaxCrossEntropyWithLogits))
tf_export("raw_ops.SoftmaxCrossEntropyWithLogits")(SoftmaxCrossEntropyWithLogits)
def softmax_cross_entropy_with_logits_eager_fallback(features, labels, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softmax_cross_entropy_with_logits
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([features, labels], _ctx)
(features, labels) = _inputs_T
_inputs_flat = [features, labels]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftmaxCrossEntropyWithLogits", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('math.softplus', 'nn.softplus')
def softplus(features, name=None):
r"""Computes softplus: `log(exp(features) + 1)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Softplus",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return softplus_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
softplus, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Softplus", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
softplus, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Softplus", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Softplus(features, name=None):
return softplus(features=features, name=name)
Softplus.__doc__ = softplus.__doc__
Softplus = _doc_controls.do_not_generate_docs(_kwarg_only(Softplus))
tf_export("raw_ops.Softplus")(Softplus)
def softplus_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softplus
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softplus", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Softplus", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def softplus_grad(gradients, features, name=None):
r"""Computes softplus gradients for a softplus operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding softplus operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding softplus operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"SoftplusGrad", name, _ctx.post_execution_callbacks, gradients,
features)
return _result
except _core._FallbackException:
try:
return softplus_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"SoftplusGrad", gradients=gradients, features=features, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"SoftplusGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def SoftplusGrad(gradients, features, name=None):
return softplus_grad(gradients=gradients, features=features, name=name)
SoftplusGrad.__doc__ = softplus_grad.__doc__
SoftplusGrad = _doc_controls.do_not_generate_docs(_kwarg_only(SoftplusGrad))
tf_export("raw_ops.SoftplusGrad")(SoftplusGrad)
def softplus_grad_eager_fallback(gradients, features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softplus_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftplusGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SoftplusGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.softsign', 'math.softsign')
def softsign(features, name=None):
r"""Computes softsign: `features / (abs(features) + 1)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "Softsign",
name, _ctx.post_execution_callbacks, features)
return _result
except _core._FallbackException:
try:
return softsign_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
softsign, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"Softsign", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
softsign, features=features, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"Softsign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def Softsign(features, name=None):
return softsign(features=features, name=name)
Softsign.__doc__ = softsign.__doc__
Softsign = _doc_controls.do_not_generate_docs(_kwarg_only(Softsign))
tf_export("raw_ops.Softsign")(Softsign)
def softsign_eager_fallback(features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softsign
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softsign", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"Softsign", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def softsign_grad(gradients, features, name=None):
r"""Computes softsign gradients for a softsign operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding softsign operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding softsign operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"SoftsignGrad", name, _ctx.post_execution_callbacks, gradients,
features)
return _result
except _core._FallbackException:
try:
return softsign_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"SoftsignGrad", gradients=gradients, features=features, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"))
_execute.record_gradient(
"SoftsignGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def SoftsignGrad(gradients, features, name=None):
return softsign_grad(gradients=gradients, features=features, name=name)
SoftsignGrad.__doc__ = softsign_grad.__doc__
SoftsignGrad = _doc_controls.do_not_generate_docs(_kwarg_only(SoftsignGrad))
tf_export("raw_ops.SoftsignGrad")(SoftsignGrad)
def softsign_grad_eager_fallback(gradients, features, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function softsign_grad
"""
_ctx = ctx if ctx else _context.context()
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], _ctx)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftsignGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"SoftsignGrad", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_sparse_softmax_cross_entropy_with_logits_outputs = ["loss", "backprop"]
_SparseSoftmaxCrossEntropyWithLogitsOutput = _collections.namedtuple(
"SparseSoftmaxCrossEntropyWithLogits",
_sparse_softmax_cross_entropy_with_logits_outputs)
def sparse_softmax_cross_entropy_with_logits(features, labels, name=None):
r"""Computes softmax cross entropy cost and gradients to backpropagate.
Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
a matrix of label probabilities, but rather a single label per row
of features. This label is considered to have probability 1.0 for the
given row.
Inputs are the logits, not probabilities.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
batch_size x num_classes matrix
labels: A `Tensor`. Must be one of the following types: `int32`, `int64`.
batch_size vector with values in [0, num_classes).
This is the label for the given minibatch entry.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (loss, backprop).
loss: A `Tensor`. Has the same type as `features`.
backprop: A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"SparseSoftmaxCrossEntropyWithLogits", name,
_ctx.post_execution_callbacks, features, labels)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
except _core._FallbackException:
try:
return sparse_softmax_cross_entropy_with_logits_eager_fallback(
features, labels, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
_, _, _op = _op_def_lib._apply_op_helper(
"SparseSoftmaxCrossEntropyWithLogits", features=features,
labels=labels, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op._get_attr_type("T"), "Tlabels",
_op._get_attr_type("Tlabels"))
_execute.record_gradient(
"SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
def SparseSoftmaxCrossEntropyWithLogits(features, labels, name=None):
return sparse_softmax_cross_entropy_with_logits(features=features, labels=labels, name=name)
SparseSoftmaxCrossEntropyWithLogits.__doc__ = sparse_softmax_cross_entropy_with_logits.__doc__
SparseSoftmaxCrossEntropyWithLogits = _doc_controls.do_not_generate_docs(_kwarg_only(SparseSoftmaxCrossEntropyWithLogits))
tf_export("raw_ops.SparseSoftmaxCrossEntropyWithLogits")(SparseSoftmaxCrossEntropyWithLogits)
def sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function sparse_softmax_cross_entropy_with_logits
"""
_ctx = ctx if ctx else _context.context()
_attr_T, (features,) = _execute.args_to_matching_eager([features], _ctx)
_attr_Tlabels, (labels,) = _execute.args_to_matching_eager([labels], _ctx, _dtypes.int64)
_inputs_flat = [features, labels]
_attrs = ("T", _attr_T, "Tlabels", _attr_Tlabels)
_result = _execute.execute(b"SparseSoftmaxCrossEntropyWithLogits", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result, name)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
_top_k_outputs = ["values", "indices"]
_TopKOutput = _collections.namedtuple(
"TopK", _top_k_outputs)
def top_k(input, k, sorted=True, name=None):
r"""Finds values and indices of the `k` largest elements for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
If `k` varies dynamically, use `TopKV2` below.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `k`.
k: An `int` that is `>= 0`.
Number of top elements to look for along the last dimension (along each
row for matrices).
sorted: An optional `bool`. Defaults to `True`.
If true the resulting `k` elements will be sorted by the values in
descending order.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (values, indices).
values: A `Tensor`. Has the same type as `input`.
indices: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "TopK",
name, _ctx.post_execution_callbacks, input, "k", k, "sorted", sorted)
_result = _TopKOutput._make(_result)
return _result
except _core._FallbackException:
try:
return top_k_eager_fallback(
input, k=k, sorted=sorted, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
k = _execute.make_int(k, "k")
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_, _, _op = _op_def_lib._apply_op_helper(
"TopK", input=input, k=k, sorted=sorted, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("k", _op.get_attr("k"), "sorted", _op.get_attr("sorted"), "T",
_op._get_attr_type("T"))
_execute.record_gradient(
"TopK", _inputs_flat, _attrs, _result, name)
_result = _TopKOutput._make(_result)
return _result
def TopK(input, k, sorted=True, name=None):
return top_k(input=input, k=k, sorted=sorted, name=name)
TopK.__doc__ = top_k.__doc__
TopK = _doc_controls.do_not_generate_docs(_kwarg_only(TopK))
tf_export("raw_ops.TopK")(TopK)
def top_k_eager_fallback(input, k, sorted=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function top_k
"""
_ctx = ctx if ctx else _context.context()
k = _execute.make_int(k, "k")
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
_inputs_flat = [input]
_attrs = ("k", k, "sorted", sorted, "T", _attr_T)
_result = _execute.execute(b"TopK", 2, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"TopK", _inputs_flat, _attrs, _result, name)
_result = _TopKOutput._make(_result)
return _result
_top_kv2_outputs = ["values", "indices"]
_TopKV2Output = _collections.namedtuple(
"TopKV2", _top_kv2_outputs)
def top_kv2(input, k, sorted=True, name=None):
r"""Finds values and indices of the `k` largest elements for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `k`.
k: A `Tensor` of type `int32`.
0-D. Number of top elements to look for along the last dimension (along each
row for matrices).
sorted: An optional `bool`. Defaults to `True`.
If true the resulting `k` elements will be sorted by the values in
descending order.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (values, indices).
values: A `Tensor`. Has the same type as `input`.
indices: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name, "TopKV2",
name, _ctx.post_execution_callbacks, input, k, "sorted", sorted)
_result = _TopKV2Output._make(_result)
return _result
except _core._FallbackException:
try:
return top_kv2_eager_fallback(
input, k, sorted=sorted, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_, _, _op = _op_def_lib._apply_op_helper(
"TopKV2", input=input, k=k, sorted=sorted, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("sorted", _op.get_attr("sorted"), "T", _op._get_attr_type("T"))
_execute.record_gradient(
"TopKV2", _inputs_flat, _attrs, _result, name)
_result = _TopKV2Output._make(_result)
return _result
def TopKV2(input, k, sorted=True, name=None):
return top_kv2(input=input, k=k, sorted=sorted, name=name)
TopKV2.__doc__ = top_kv2.__doc__
TopKV2 = _doc_controls.do_not_generate_docs(_kwarg_only(TopKV2))
tf_export("raw_ops.TopKV2")(TopKV2)
def top_kv2_eager_fallback(input, k, sorted=True, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function top_kv2
"""
_ctx = ctx if ctx else _context.context()
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
k = _ops.convert_to_tensor(k, _dtypes.int32)
_inputs_flat = [input, k]
_attrs = ("sorted", sorted, "T", _attr_T)
_result = _execute.execute(b"TopKV2", 2, inputs=_inputs_flat, attrs=_attrs,
ctx=_ctx, name=name)
_execute.record_gradient(
"TopKV2", _inputs_flat, _attrs, _result, name)
_result = _TopKV2Output._make(_result)
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "AvgPool"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "AvgPool3D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "AvgPool3DGrad"
# input_arg {
# name: "orig_input_shape"
# type: DT_INT32
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "AvgPoolGrad"
# input_arg {
# name: "orig_input_shape"
# type: DT_INT32
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "BatchNormWithGlobalNormalization"
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "m"
# type_attr: "T"
# }
# input_arg {
# name: "v"
# type_attr: "T"
# }
# input_arg {
# name: "beta"
# type_attr: "T"
# }
# input_arg {
# name: "gamma"
# type_attr: "T"
# }
# output_arg {
# name: "result"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "variance_epsilon"
# type: "float"
# }
# attr {
# name: "scale_after_normalization"
# type: "bool"
# }
# deprecation {
# version: 9
# explanation: "Use tf.nn.batch_normalization()"
# }
# }
# op {
# name: "BatchNormWithGlobalNormalizationGrad"
# input_arg {
# name: "t"
# type_attr: "T"
# }
# input_arg {
# name: "m"
# type_attr: "T"
# }
# input_arg {
# name: "v"
# type_attr: "T"
# }
# input_arg {
# name: "gamma"
# type_attr: "T"
# }
# input_arg {
# name: "backprop"
# type_attr: "T"
# }
# output_arg {
# name: "dx"
# type_attr: "T"
# }
# output_arg {
# name: "dm"
# type_attr: "T"
# }
# output_arg {
# name: "dv"
# type_attr: "T"
# }
# output_arg {
# name: "db"
# type_attr: "T"
# }
# output_arg {
# name: "dg"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "variance_epsilon"
# type: "float"
# }
# attr {
# name: "scale_after_normalization"
# type: "bool"
# }
# deprecation {
# version: 9
# explanation: "Use tf.nn.batch_normalization()"
# }
# }
# op {
# name: "BiasAdd"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "bias"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# }
# op {
# name: "BiasAddGrad"
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# }
# op {
# name: "BiasAddV1"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# input_arg {
# name: "bias"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_INT64
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_COMPLEX128
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Conv2D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "use_cudnn_on_gpu"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# s: "EXPLICIT"
# }
# }
# }
# attr {
# name: "explicit_paddings"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Conv2DBackpropFilter"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter_sizes"
# type: DT_INT32
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "use_cudnn_on_gpu"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# s: "EXPLICIT"
# }
# }
# }
# attr {
# name: "explicit_paddings"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Conv2DBackpropInput"
# input_arg {
# name: "input_sizes"
# type: DT_INT32
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "use_cudnn_on_gpu"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# s: "EXPLICIT"
# }
# }
# }
# attr {
# name: "explicit_paddings"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Conv3D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Conv3DBackpropFilter"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# deprecation {
# version: 10
# explanation: "Use Conv3DBackpropFilterV2"
# }
# }
# op {
# name: "Conv3DBackpropFilterV2"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter_sizes"
# type: DT_INT32
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Conv3DBackpropInput"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# deprecation {
# version: 10
# explanation: "Use Conv3DBackpropInputV2"
# }
# }
# op {
# name: "Conv3DBackpropInputV2"
# input_arg {
# name: "input_sizes"
# type_attr: "Tshape"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "Tshape"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "DataFormatDimMap"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "src_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# }
# attr {
# name: "dst_format"
# type: "string"
# default_value {
# s: "NCHW"
# }
# }
# }
# op {
# name: "DataFormatVecPermute"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "src_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# }
# attr {
# name: "dst_format"
# type: "string"
# default_value {
# s: "NCHW"
# }
# }
# }
# op {
# name: "DepthwiseConv2dNative"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "DepthwiseConv2dNativeBackpropFilter"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter_sizes"
# type: DT_INT32
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "DepthwiseConv2dNativeBackpropInput"
# input_arg {
# name: "input_sizes"
# type: DT_INT32
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "Dilation2D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "rates"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "Dilation2DBackpropFilter"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "filter_backprop"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "rates"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "Dilation2DBackpropInput"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "in_backprop"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "rates"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "Elu"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "EluGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "outputs"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "FractionalAvgPool"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# output_arg {
# name: "row_pooling_sequence"
# type: DT_INT64
# }
# output_arg {
# name: "col_pooling_sequence"
# type: DT_INT64
# }
# attr {
# name: "pooling_ratio"
# type: "list(float)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "pseudo_random"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "overlapping"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "deterministic"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "seed"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed2"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "FractionalAvgPoolGrad"
# input_arg {
# name: "orig_input_tensor_shape"
# type: DT_INT64
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# input_arg {
# name: "row_pooling_sequence"
# type: DT_INT64
# }
# input_arg {
# name: "col_pooling_sequence"
# type: DT_INT64
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "overlapping"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "FractionalMaxPool"
# input_arg {
# name: "value"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# output_arg {
# name: "row_pooling_sequence"
# type: DT_INT64
# }
# output_arg {
# name: "col_pooling_sequence"
# type: DT_INT64
# }
# attr {
# name: "pooling_ratio"
# type: "list(float)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "pseudo_random"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "overlapping"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "deterministic"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "seed"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "seed2"
# type: "int"
# default_value {
# i: 0
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "FractionalMaxPoolGrad"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "out_backprop"
# type_attr: "T"
# }
# input_arg {
# name: "row_pooling_sequence"
# type: DT_INT64
# }
# input_arg {
# name: "col_pooling_sequence"
# type: DT_INT64
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "overlapping"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "FusedBatchNorm"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type_attr: "T"
# }
# input_arg {
# name: "offset"
# type_attr: "T"
# }
# input_arg {
# name: "mean"
# type_attr: "T"
# }
# input_arg {
# name: "variance"
# type_attr: "T"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "batch_mean"
# type_attr: "T"
# }
# output_arg {
# name: "batch_variance"
# type_attr: "T"
# }
# output_arg {
# name: "reserve_space_1"
# type_attr: "T"
# }
# output_arg {
# name: "reserve_space_2"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedBatchNormGrad"
# input_arg {
# name: "y_backprop"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type_attr: "T"
# }
# input_arg {
# name: "reserve_space_1"
# type_attr: "T"
# }
# input_arg {
# name: "reserve_space_2"
# type_attr: "T"
# }
# output_arg {
# name: "x_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "scale_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "offset_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "reserve_space_3"
# type_attr: "T"
# }
# output_arg {
# name: "reserve_space_4"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedBatchNormGradV2"
# input_arg {
# name: "y_backprop"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type: DT_FLOAT
# }
# input_arg {
# name: "reserve_space_1"
# type_attr: "U"
# }
# input_arg {
# name: "reserve_space_2"
# type_attr: "U"
# }
# output_arg {
# name: "x_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "scale_backprop"
# type_attr: "U"
# }
# output_arg {
# name: "offset_backprop"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_3"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_4"
# type_attr: "U"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "U"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedBatchNormGradV3"
# input_arg {
# name: "y_backprop"
# type_attr: "T"
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type: DT_FLOAT
# }
# input_arg {
# name: "reserve_space_1"
# type_attr: "U"
# }
# input_arg {
# name: "reserve_space_2"
# type_attr: "U"
# }
# input_arg {
# name: "reserve_space_3"
# type_attr: "U"
# }
# output_arg {
# name: "x_backprop"
# type_attr: "T"
# }
# output_arg {
# name: "scale_backprop"
# type_attr: "U"
# }
# output_arg {
# name: "offset_backprop"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_4"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_5"
# type_attr: "U"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "U"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedBatchNormV2"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type_attr: "U"
# }
# input_arg {
# name: "offset"
# type_attr: "U"
# }
# input_arg {
# name: "mean"
# type_attr: "U"
# }
# input_arg {
# name: "variance"
# type_attr: "U"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "batch_mean"
# type_attr: "U"
# }
# output_arg {
# name: "batch_variance"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_1"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_2"
# type_attr: "U"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "U"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedBatchNormV3"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "scale"
# type_attr: "U"
# }
# input_arg {
# name: "offset"
# type_attr: "U"
# }
# input_arg {
# name: "mean"
# type_attr: "U"
# }
# input_arg {
# name: "variance"
# type_attr: "U"
# }
# output_arg {
# name: "y"
# type_attr: "T"
# }
# output_arg {
# name: "batch_mean"
# type_attr: "U"
# }
# output_arg {
# name: "batch_variance"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_1"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_2"
# type_attr: "U"
# }
# output_arg {
# name: "reserve_space_3"
# type_attr: "U"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "U"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "epsilon"
# type: "float"
# default_value {
# f: 0.0001
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "is_training"
# type: "bool"
# default_value {
# b: true
# }
# }
# }
# op {
# name: "FusedPadConv2D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "paddings"
# type: DT_INT32
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "mode"
# type: "string"
# allowed_values {
# list {
# s: "REFLECT"
# s: "SYMMETRIC"
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "FusedResizeAndPadConv2D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "size"
# type: DT_INT32
# }
# input_arg {
# name: "paddings"
# type: DT_INT32
# }
# input_arg {
# name: "filter"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "resize_align_corners"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "mode"
# type: "string"
# allowed_values {
# list {
# s: "REFLECT"
# s: "SYMMETRIC"
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "InTopK"
# input_arg {
# name: "predictions"
# type: DT_FLOAT
# }
# input_arg {
# name: "targets"
# type_attr: "T"
# }
# output_arg {
# name: "precision"
# type: DT_BOOL
# }
# attr {
# name: "k"
# type: "int"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "InTopKV2"
# input_arg {
# name: "predictions"
# type: DT_FLOAT
# }
# input_arg {
# name: "targets"
# type_attr: "T"
# }
# input_arg {
# name: "k"
# type_attr: "T"
# }
# output_arg {
# name: "precision"
# type: DT_BOOL
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_INT32
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "L2Loss"
# input_arg {
# name: "t"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "LRN"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "depth_radius"
# type: "int"
# default_value {
# i: 5
# }
# }
# attr {
# name: "bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "alpha"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "beta"
# type: "float"
# default_value {
# f: 0.5
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LRNGrad"
# input_arg {
# name: "input_grads"
# type_attr: "T"
# }
# input_arg {
# name: "input_image"
# type_attr: "T"
# }
# input_arg {
# name: "output_image"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "depth_radius"
# type: "int"
# default_value {
# i: 5
# }
# }
# attr {
# name: "bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "alpha"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "beta"
# type: "float"
# default_value {
# f: 0.5
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LeakyRelu"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "alpha"
# type: "float"
# default_value {
# f: 0.2
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "LeakyReluGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "alpha"
# type: "float"
# default_value {
# f: 0.2
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "LogSoftmax"
# input_arg {
# name: "logits"
# type_attr: "T"
# }
# output_arg {
# name: "logsoftmax"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "MaxPool"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_UINT16
# type: DT_QINT8
# }
# }
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# s: "NCHW_VECT_C"
# }
# }
# }
# }
# op {
# name: "MaxPool3D"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "MaxPool3DGrad"
# input_arg {
# name: "orig_input"
# type_attr: "TInput"
# }
# input_arg {
# name: "orig_output"
# type_attr: "TInput"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# attr {
# name: "TInput"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "MaxPool3DGradGrad"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 5
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NDHWC"
# }
# allowed_values {
# list {
# s: "NDHWC"
# s: "NCDHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGrad"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGradGrad"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGradGradV2"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "ksize"
# type: DT_INT32
# }
# input_arg {
# name: "strides"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGradGradWithArgmax"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "argmax"
# type_attr: "Targmax"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "include_batch_in_index"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Targmax"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGradV2"
# input_arg {
# name: "orig_input"
# type_attr: "T"
# }
# input_arg {
# name: "orig_output"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "ksize"
# type: DT_INT32
# }
# input_arg {
# name: "strides"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolGradWithArgmax"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "grad"
# type_attr: "T"
# }
# input_arg {
# name: "argmax"
# type_attr: "Targmax"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "include_batch_in_index"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "Targmax"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "MaxPoolV2"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "ksize"
# type: DT_INT32
# }
# input_arg {
# name: "strides"
# type: DT_INT32
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_UINT16
# type: DT_QINT8
# }
# }
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "data_format"
# type: "string"
# default_value {
# s: "NHWC"
# }
# allowed_values {
# list {
# s: "NHWC"
# s: "NCHW"
# s: "NCHW_VECT_C"
# }
# }
# }
# }
# op {
# name: "MaxPoolWithArgmax"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# output_arg {
# name: "argmax"
# type_attr: "Targmax"
# }
# attr {
# name: "ksize"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "strides"
# type: "list(int)"
# has_minimum: true
# minimum: 4
# }
# attr {
# name: "Targmax"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "include_batch_in_index"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "NthElement"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "n"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_attr: "T"
# }
# attr {
# name: "reverse"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "QuantizedAvgPool"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "ksize"
# type: "list(int)"
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "QuantizedBatchNormWithGlobalNormalization"
# input_arg {
# name: "t"
# type_attr: "Tinput"
# }
# input_arg {
# name: "t_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "t_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "m"
# type_attr: "Tinput"
# }
# input_arg {
# name: "m_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "m_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "v"
# type_attr: "Tinput"
# }
# input_arg {
# name: "v_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "v_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "beta"
# type_attr: "Tinput"
# }
# input_arg {
# name: "beta_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "beta_max"
# type: DT_FLOAT
# }
# input_arg {
# name: "gamma"
# type_attr: "Tinput"
# }
# input_arg {
# name: "gamma_min"
# type: DT_FLOAT
# }
# input_arg {
# name: "gamma_max"
# type: DT_FLOAT
# }
# output_arg {
# name: "result"
# type_attr: "out_type"
# }
# output_arg {
# name: "result_min"
# type: DT_FLOAT
# }
# output_arg {
# name: "result_max"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "variance_epsilon"
# type: "float"
# }
# attr {
# name: "scale_after_normalization"
# type: "bool"
# }
# }
# op {
# name: "QuantizedBiasAdd"
# input_arg {
# name: "input"
# type_attr: "T1"
# }
# input_arg {
# name: "bias"
# type_attr: "T2"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_bias"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedConv2D"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DAndRelu"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DAndReluAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DPerChannel"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBias"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasAndRelu"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasAndReluAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "summand"
# type_attr: "Tsummand"
# }
# input_arg {
# name: "min_summand"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_summand"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "Tsummand"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasSumAndRelu"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "summand"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedConv2DWithBiasSumAndReluAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "summand"
# type_attr: "Tsummand"
# }
# input_arg {
# name: "min_summand"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_summand"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "Tsummand"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# attr {
# name: "padding_list"
# type: "list(int)"
# default_value {
# list {
# }
# }
# }
# }
# op {
# name: "QuantizedDepthwiseConv2D"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedDepthwiseConv2DWithBias"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedDepthwiseConv2DWithBiasAndRelu"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize"
# input_arg {
# name: "input"
# type_attr: "Tinput"
# }
# input_arg {
# name: "filter"
# type_attr: "Tfilter"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_filter"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tfilter"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# attr {
# name: "dilations"
# type: "list(int)"
# default_value {
# list {
# i: 1
# i: 1
# i: 1
# i: 1
# }
# }
# }
# }
# op {
# name: "QuantizedMatMulWithBias"
# input_arg {
# name: "a"
# type_attr: "T1"
# }
# input_arg {
# name: "b"
# type_attr: "T2"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_b"
# type: DT_FLOAT
# }
# output_arg {
# name: "out"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "input_quant_mode"
# type: "string"
# default_value {
# s: "MIN_FIRST"
# }
# allowed_values {
# list {
# s: "MIN_FIRST"
# s: "SCALED"
# }
# }
# }
# }
# op {
# name: "QuantizedMatMulWithBiasAndRelu"
# input_arg {
# name: "a"
# type_attr: "T1"
# }
# input_arg {
# name: "b"
# type_attr: "T2"
# }
# input_arg {
# name: "bias"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_b"
# type: DT_FLOAT
# }
# output_arg {
# name: "out"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QINT32
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "input_quant_mode"
# type: "string"
# default_value {
# s: "MIN_FIRST"
# }
# allowed_values {
# list {
# s: "MIN_FIRST"
# s: "SCALED"
# }
# }
# }
# }
# op {
# name: "QuantizedMatMulWithBiasAndReluAndRequantize"
# input_arg {
# name: "a"
# type_attr: "T1"
# }
# input_arg {
# name: "b"
# type_attr: "T2"
# }
# input_arg {
# name: "bias"
# type_attr: "Tbias"
# }
# input_arg {
# name: "min_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_a"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_b"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_freezed_output"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_freezed_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "out"
# type_attr: "Toutput"
# }
# output_arg {
# name: "min_out"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_out"
# type: DT_FLOAT
# }
# attr {
# name: "T1"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "T2"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "Tbias"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_QINT32
# }
# }
# }
# attr {
# name: "Toutput"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "transpose_a"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "transpose_b"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "input_quant_mode"
# type: "string"
# default_value {
# s: "MIN_FIRST"
# }
# allowed_values {
# list {
# s: "MIN_FIRST"
# s: "SCALED"
# }
# }
# }
# }
# op {
# name: "QuantizedMaxPool"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "min_input"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_input"
# type: DT_FLOAT
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# output_arg {
# name: "min_output"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_output"
# type: DT_FLOAT
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "ksize"
# type: "list(int)"
# }
# attr {
# name: "strides"
# type: "list(int)"
# }
# attr {
# name: "padding"
# type: "string"
# allowed_values {
# list {
# s: "SAME"
# s: "VALID"
# }
# }
# }
# }
# op {
# name: "QuantizedRelu"
# input_arg {
# name: "features"
# type_attr: "Tinput"
# }
# input_arg {
# name: "min_features"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_features"
# type: DT_FLOAT
# }
# output_arg {
# name: "activations"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_activations"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_activations"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedRelu6"
# input_arg {
# name: "features"
# type_attr: "Tinput"
# }
# input_arg {
# name: "min_features"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_features"
# type: DT_FLOAT
# }
# output_arg {
# name: "activations"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_activations"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_activations"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "QuantizedReluX"
# input_arg {
# name: "features"
# type_attr: "Tinput"
# }
# input_arg {
# name: "max_value"
# type: DT_FLOAT
# }
# input_arg {
# name: "min_features"
# type: DT_FLOAT
# }
# input_arg {
# name: "max_features"
# type: DT_FLOAT
# }
# output_arg {
# name: "activations"
# type_attr: "out_type"
# }
# output_arg {
# name: "min_activations"
# type: DT_FLOAT
# }
# output_arg {
# name: "max_activations"
# type: DT_FLOAT
# }
# attr {
# name: "Tinput"
# type: "type"
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# attr {
# name: "out_type"
# type: "type"
# default_value {
# type: DT_QUINT8
# }
# allowed_values {
# list {
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_QINT16
# type: DT_QUINT16
# }
# }
# }
# }
# op {
# name: "Relu"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_QINT8
# }
# }
# }
# }
# op {
# name: "Relu6"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Relu6Grad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "ReluGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
# op {
# name: "Selu"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "SeluGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "outputs"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Softmax"
# input_arg {
# name: "logits"
# type_attr: "T"
# }
# output_arg {
# name: "softmax"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "SoftmaxCrossEntropyWithLogits"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# input_arg {
# name: "labels"
# type_attr: "T"
# }
# output_arg {
# name: "loss"
# type_attr: "T"
# }
# output_arg {
# name: "backprop"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Softplus"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "SoftplusGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "Softsign"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "activations"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "SoftsignGrad"
# input_arg {
# name: "gradients"
# type_attr: "T"
# }
# input_arg {
# name: "features"
# type_attr: "T"
# }
# output_arg {
# name: "backprops"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# }
# op {
# name: "SparseSoftmaxCrossEntropyWithLogits"
# input_arg {
# name: "features"
# type_attr: "T"
# }
# input_arg {
# name: "labels"
# type_attr: "Tlabels"
# }
# output_arg {
# name: "loss"
# type_attr: "T"
# }
# output_arg {
# name: "backprop"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_BFLOAT16
# type: DT_FLOAT
# type: DT_DOUBLE
# }
# }
# }
# attr {
# name: "Tlabels"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# }
# op {
# name: "TopK"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# output_arg {
# name: "values"
# type_attr: "T"
# }
# output_arg {
# name: "indices"
# type: DT_INT32
# }
# attr {
# name: "k"
# type: "int"
# has_minimum: true
# }
# attr {
# name: "sorted"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# deprecation {
# version: 7
# explanation: "Use TopKV2 instead"
# }
# }
# op {
# name: "TopKV2"
# input_arg {
# name: "input"
# type_attr: "T"
# }
# input_arg {
# name: "k"
# type: DT_INT32
# }
# output_arg {
# name: "values"
# type_attr: "T"
# }
# output_arg {
# name: "indices"
# type: DT_INT32
# }
# attr {
# name: "sorted"
# type: "bool"
# default_value {
# b: true
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_UINT8
# type: DT_INT16
# type: DT_INT8
# type: DT_INT64
# type: DT_BFLOAT16
# type: DT_UINT16
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\274\001\n\007AvgPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\301\001\n\tAvgPool3D\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\332\001\n\rAvgPool3DGrad\022\024\n\020orig_input_shape\030\003\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\325\001\n\013AvgPoolGrad\022\024\n\020orig_input_shape\030\003\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\343\001\n BatchNormWithGlobalNormalization\022\006\n\001t\"\001T\022\006\n\001m\"\001T\022\006\n\001v\"\001T\022\t\n\004beta\"\001T\022\n\n\005gamma\"\001T\032\013\n\006result\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004boolB#\010\t\022\037Use tf.nn.batch_normalization()\n\213\002\n$BatchNormWithGlobalNormalizationGrad\022\006\n\001t\"\001T\022\006\n\001m\"\001T\022\006\n\001v\"\001T\022\n\n\005gamma\"\001T\022\r\n\010backprop\"\001T\032\007\n\002dx\"\001T\032\007\n\002dm\"\001T\032\007\n\002dv\"\001T\032\007\n\002db\"\001T\032\007\n\002dg\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004boolB#\010\t\022\037Use tf.nn.batch_normalization()\n~\n\007BiasAdd\022\n\n\005value\"\001T\022\t\n\004bias\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\n~\n\013BiasAddGrad\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\nQ\n\tBiasAddV1\022\n\n\005value\"\001T\022\t\n\004bias\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\n\232\002\n\006Conv2D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\",\n\007padding\022\006string:\031\n\027\022\004SAME\022\005VALID\022\010EXPLICIT\"\"\n\021explicit_paddings\022\tlist(int)\032\002\n\000\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\300\002\n\024Conv2DBackpropFilter\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\",\n\007padding\022\006string:\031\n\027\022\004SAME\022\005VALID\022\010EXPLICIT\"\"\n\021explicit_paddings\022\tlist(int)\032\002\n\000\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\277\002\n\023Conv2DBackpropInput\022\017\n\013input_sizes\030\003\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\034\n\020use_cudnn_on_gpu\022\004bool\032\002(\001\",\n\007padding\022\006string:\031\n\027\022\004SAME\022\005VALID\022\010EXPLICIT\"\"\n\021explicit_paddings\022\tlist(int)\032\002\n\000\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\326\001\n\006Conv3D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\n\344\001\n\024Conv3DBackpropFilter\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001B\036\010\n\022\032Use Conv3DBackpropFilterV2\n\376\001\n\026Conv3DBackpropFilterV2\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\n\342\001\n\023Conv3DBackpropInput\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001B\035\010\n\022\031Use Conv3DBackpropInputV2\n\237\002\n\025Conv3DBackpropInputV2\022\025\n\013input_sizes\"\006Tshape\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"!\n\tdilations\022\tlist(int)\032\t\n\007\032\005\001\001\001\001\001\"\032\n\006Tshape\022\004type\032\0020\003:\006\n\0042\002\003\t\nu\n\020DataFormatDimMap\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\"\034\n\nsrc_format\022\006string\032\006\022\004NHWC\"\034\n\ndst_format\022\006string\032\006\022\004NCHW\ny\n\024DataFormatVecPermute\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\"\034\n\nsrc_format\022\006string\032\006\022\004NHWC\"\034\n\ndst_format\022\006string\032\006\022\004NCHW\n\335\001\n\025DepthwiseConv2dNative\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\203\002\n#DepthwiseConv2dNativeBackpropFilter\022\n\n\005input\"\001T\022\020\n\014filter_sizes\030\003\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\202\002\n\"DepthwiseConv2dNativeBackpropInput\022\017\n\013input_sizes\030\003\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\245\001\n\nDilation2D\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\317\001\n\030Dilation2DBackpropFilter\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\024\n\017filter_backprop\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\312\001\n\027Dilation2DBackpropInput\022\n\n\005input\"\001T\022\013\n\006filter\"\001T\022\021\n\014out_backprop\"\001T\032\020\n\013in_backprop\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\007strides\022\tlist(int)(\0010\004\"\026\n\005rates\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n;\n\003Elu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nL\n\007EluGrad\022\016\n\tgradients\"\001T\022\014\n\007outputs\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\211\002\n\021FractionalAvgPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\032\030\n\024row_pooling_sequence\030\t\032\030\n\024col_pooling_sequence\030\t\" \n\rpooling_ratio\022\013list(float)(\0010\004\"\031\n\rpseudo_random\022\004bool\032\002(\000\"\027\n\013overlapping\022\004bool\032\002(\000\"\031\n\rdeterministic\022\004bool\032\002(\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\266\001\n\025FractionalAvgPoolGrad\022\033\n\027orig_input_tensor_shape\030\t\022\021\n\014out_backprop\"\001T\022\030\n\024row_pooling_sequence\030\t\022\030\n\024col_pooling_sequence\030\t\032\013\n\006output\"\001T\"\027\n\013overlapping\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\211\002\n\021FractionalMaxPool\022\n\n\005value\"\001T\032\013\n\006output\"\001T\032\030\n\024row_pooling_sequence\030\t\032\030\n\024col_pooling_sequence\030\t\" \n\rpooling_ratio\022\013list(float)(\0010\004\"\031\n\rpseudo_random\022\004bool\032\002(\000\"\027\n\013overlapping\022\004bool\032\002(\000\"\031\n\rdeterministic\022\004bool\032\002(\000\"\017\n\004seed\022\003int\032\002\030\000\"\020\n\005seed2\022\003int\032\002\030\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\274\001\n\025FractionalMaxPoolGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\021\n\014out_backprop\"\001T\022\030\n\024row_pooling_sequence\030\t\022\030\n\024col_pooling_sequence\030\t\032\013\n\006output\"\001T\"\027\n\013overlapping\022\004bool\032\002(\000\"\023\n\001T\022\004type:\010\n\0062\004\001\002\003\t\n\230\002\n\016FusedBatchNorm\022\006\n\001x\"\001T\022\n\n\005scale\"\001T\022\013\n\006offset\"\001T\022\t\n\004mean\"\001T\022\r\n\010variance\"\001T\032\006\n\001y\"\001T\032\017\n\nbatch_mean\"\001T\032\023\n\016batch_variance\"\001T\032\024\n\017reserve_space_1\"\001T\032\024\n\017reserve_space_2\"\001T\"\020\n\001T\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\300\002\n\022FusedBatchNormGrad\022\017\n\ny_backprop\"\001T\022\006\n\001x\"\001T\022\n\n\005scale\"\001T\022\024\n\017reserve_space_1\"\001T\022\024\n\017reserve_space_2\"\001T\032\017\n\nx_backprop\"\001T\032\023\n\016scale_backprop\"\001T\032\024\n\017offset_backprop\"\001T\032\024\n\017reserve_space_3\"\001T\032\024\n\017reserve_space_4\"\001T\"\020\n\001T\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\325\002\n\024FusedBatchNormGradV2\022\017\n\ny_backprop\"\001T\022\006\n\001x\"\001T\022\t\n\005scale\030\001\022\024\n\017reserve_space_1\"\001U\022\024\n\017reserve_space_2\"\001U\032\017\n\nx_backprop\"\001T\032\023\n\016scale_backprop\"\001U\032\024\n\017offset_backprop\"\001U\032\024\n\017reserve_space_3\"\001U\032\024\n\017reserve_space_4\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\353\002\n\024FusedBatchNormGradV3\022\017\n\ny_backprop\"\001T\022\006\n\001x\"\001T\022\t\n\005scale\030\001\022\024\n\017reserve_space_1\"\001U\022\024\n\017reserve_space_2\"\001U\022\024\n\017reserve_space_3\"\001U\032\017\n\nx_backprop\"\001T\032\023\n\016scale_backprop\"\001U\032\024\n\017offset_backprop\"\001U\032\024\n\017reserve_space_4\"\001U\032\024\n\017reserve_space_5\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\256\002\n\020FusedBatchNormV2\022\006\n\001x\"\001T\022\n\n\005scale\"\001U\022\013\n\006offset\"\001U\022\t\n\004mean\"\001U\022\r\n\010variance\"\001U\032\006\n\001y\"\001T\032\017\n\nbatch_mean\"\001U\032\023\n\016batch_variance\"\001U\032\024\n\017reserve_space_1\"\001U\032\024\n\017reserve_space_2\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\304\002\n\020FusedBatchNormV3\022\006\n\001x\"\001T\022\n\n\005scale\"\001U\022\013\n\006offset\"\001U\022\t\n\004mean\"\001U\022\r\n\010variance\"\001U\032\006\n\001y\"\001T\032\017\n\nbatch_mean\"\001U\032\023\n\016batch_variance\"\001U\032\024\n\017reserve_space_1\"\001U\032\024\n\017reserve_space_2\"\001U\032\024\n\017reserve_space_3\"\001U\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\"\020\n\001U\022\004type:\005\n\0032\001\001\"\027\n\007epsilon\022\005float\032\005%\027\267\3218\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\027\n\013is_training\022\004bool\032\002(\001\n\272\001\n\016FusedPadConv2D\022\n\n\005input\"\001T\022\014\n\010paddings\030\003\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\357\001\n\027FusedResizeAndPadConv2D\022\n\n\005input\"\001T\022\010\n\004size\030\003\022\014\n\010paddings\030\003\022\013\n\006filter\"\001T\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\023\001\002\" \n\024resize_align_corners\022\004bool\032\002(\000\"&\n\004mode\022\006string:\026\n\024\022\007REFLECT\022\tSYMMETRIC\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\nW\n\006InTopK\022\017\n\013predictions\030\001\022\014\n\007targets\"\001T\032\r\n\tprecision\030\n\"\010\n\001k\022\003int\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\nW\n\010InTopKV2\022\017\n\013predictions\030\001\022\014\n\007targets\"\001T\022\006\n\001k\"\001T\032\r\n\tprecision\030\n\"\025\n\001T\022\004type\032\0020\003:\006\n\0042\002\003\t\n2\n\006L2Loss\022\006\n\001t\"\001T\032\013\n\006output\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\222\001\n\003LRN\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\027\n\014depth_radius\022\003int\032\002\030\005\"\024\n\004bias\022\005float\032\005%\000\000\200?\"\025\n\005alpha\022\005float\032\005%\000\000\200?\"\024\n\004beta\022\005float\032\005%\000\000\000?\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n\301\001\n\007LRNGrad\022\020\n\013input_grads\"\001T\022\020\n\013input_image\"\001T\022\021\n\014output_image\"\001T\032\013\n\006output\"\001T\"\027\n\014depth_radius\022\003int\032\002\030\005\"\024\n\004bias\022\005float\032\005%\000\000\200?\"\025\n\005alpha\022\005float\032\005%\000\000\200?\"\024\n\004beta\022\005float\032\005%\000\000\000?\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n\\\n\tLeakyRelu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\025\n\005alpha\022\005float\032\005%\315\314L>\"\027\n\001T\022\004type\032\0020\001:\010\n\0062\004\023\016\001\002\nn\n\rLeakyReluGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\025\n\005alpha\022\005float\032\005%\315\314L>\"\027\n\001T\022\004type\032\0020\001:\010\n\0062\004\023\016\001\002\n?\n\nLogSoftmax\022\013\n\006logits\"\001T\032\017\n\nlogsoftmax\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\324\001\n\007MaxPool\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\036\n\001T\022\004type\032\0020\001:\017\n\r2\013\023\016\001\002\003\t\004\005\006\021\013\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\300\001\n\tMaxPool3D\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\022\n\001T\022\004type:\007\n\0052\003\023\016\001\n\221\002\n\rMaxPool3DGrad\022\024\n\norig_input\"\006TInput\022\025\n\013orig_output\"\006TInput\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\026\n\001T\022\004type\032\0020\001:\007\n\0052\003\023\016\001\"\033\n\006TInput\022\004type\032\0020\001:\007\n\0052\003\023\016\001\n\363\001\n\021MaxPool3DGradGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\005\"\030\n\007strides\022\tlist(int)(\0010\005\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"0\n\013data_format\022\006string\032\007\022\005NDHWC:\020\n\016\022\005NDHWC\022\005NCDHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\356\001\n\013MaxPoolGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\037\n\001T\022\004type\032\0020\001:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\356\001\n\017MaxPoolGradGrad\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\326\001\n\021MaxPoolGradGradV2\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\202\002\n\031MaxPoolGradGradWithArgmax\022\n\n\005input\"\001T\022\t\n\004grad\"\001T\022\021\n\006argmax\"\007Targmax\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\"\n\026include_batch_in_index\022\004bool\032\002(\000\"\027\n\007Targmax\022\004type:\006\n\0042\002\003\t\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\326\001\n\rMaxPoolGradV2\022\017\n\norig_input\"\001T\022\020\n\013orig_output\"\001T\022\t\n\004grad\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"-\n\013data_format\022\006string\032\006\022\004NHWC:\016\n\014\022\004NHWC\022\004NCHW\"\037\n\001T\022\004type\032\0020\001:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\376\001\n\025MaxPoolGradWithArgmax\022\n\n\005input\"\001T\022\t\n\004grad\"\001T\022\021\n\006argmax\"\007Targmax\032\013\n\006output\"\001T\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\"\n\026include_batch_in_index\022\004bool\032\002(\000\"\027\n\007Targmax\022\004type:\006\n\0042\002\003\t\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\274\001\n\tMaxPoolV2\022\n\n\005input\"\001T\022\t\n\005ksize\030\003\022\013\n\007strides\030\003\032\013\n\006output\"\001T\"\036\n\001T\022\004type\032\0020\001:\017\n\r2\013\023\016\001\002\003\t\004\005\006\021\013\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\":\n\013data_format\022\006string\032\006\022\004NHWC:\033\n\031\022\004NHWC\022\004NCHW\022\013NCHW_VECT_C\n\363\001\n\021MaxPoolWithArgmax\022\n\n\005input\"\001T\032\013\n\006output\"\001T\032\021\n\006argmax\"\007Targmax\"\026\n\005ksize\022\tlist(int)(\0010\004\"\030\n\007strides\022\tlist(int)(\0010\004\"\033\n\007Targmax\022\004type\032\0020\t:\006\n\0042\002\003\t\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\"\"\n\026include_batch_in_index\022\004bool\032\002(\000\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n^\n\nNthElement\022\n\n\005input\"\001T\022\005\n\001n\030\003\032\013\n\006values\"\001T\"\023\n\007reverse\022\004bool\032\002(\000\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\315\001\n\020QuantizedAvgPool\022\n\n\005input\"\001T\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\032\013\n\006output\"\001T\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\022\n\005ksize\022\tlist(int)\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\231\003\n)QuantizedBatchNormWithGlobalNormalization\022\013\n\001t\"\006Tinput\022\t\n\005t_min\030\001\022\t\n\005t_max\030\001\022\013\n\001m\"\006Tinput\022\t\n\005m_min\030\001\022\t\n\005m_max\030\001\022\013\n\001v\"\006Tinput\022\t\n\005v_min\030\001\022\t\n\005v_max\030\001\022\016\n\004beta\"\006Tinput\022\014\n\010beta_min\030\001\022\014\n\010beta_max\030\001\022\017\n\005gamma\"\006Tinput\022\r\n\tgamma_min\030\001\022\r\n\tgamma_max\030\001\032\022\n\006result\"\010out_type\032\016\n\nresult_min\030\001\032\016\n\nresult_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\"\031\n\020variance_epsilon\022\005float\"!\n\031scale_after_normalization\022\004bool\n\336\001\n\020QuantizedBiasAdd\022\013\n\005input\"\002T1\022\n\n\004bias\"\002T2\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\014\n\010min_bias\030\001\022\014\n\010max_bias\030\001\032\022\n\006output\"\010out_type\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\002\n\017QuantizedConv2D\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\201\003\n\026QuantizedConv2DAndRelu\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\276\003\n#QuantizedConv2DAndReluAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\267\003\n\034QuantizedConv2DAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\013:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\345\002\n\031QuantizedConv2DPerChannel\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\214\003\n\027QuantizedConv2DWithBias\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\010\n\004bias\030\001\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\223\003\n\036QuantizedConv2DWithBiasAndRelu\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\010\n\004bias\030\001\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\354\003\n+QuantizedConv2DWithBiasAndReluAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\004bias\"\005Tbias\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\345\003\n$QuantizedConv2DWithBiasAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\004bias\"\005Tbias\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\037\n\010out_type\022\004type\032\0020\013:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\311\004\n4QuantizedConv2DWithBiasSignedSumAndReluAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\004bias\"\005Tbias\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\022\023\n\007summand\"\010Tsummand\022\017\n\013min_summand\030\001\022\017\n\013max_summand\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\033\n\010Tsummand\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\243\003\n!QuantizedConv2DWithBiasSumAndRelu\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\010\n\004bias\030\001\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\013\n\007summand\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\303\004\n.QuantizedConv2DWithBiasSumAndReluAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\004bias\"\005Tbias\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\022\023\n\007summand\"\010Tsummand\022\017\n\013min_summand\030\001\022\017\n\013max_summand\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\033\n\010Tsummand\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\"\035\n\014padding_list\022\tlist(int)\032\002\n\000\n\344\002\n\030QuantizedDepthwiseConv2D\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\366\002\n QuantizedDepthwiseConv2DWithBias\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\010\n\004bias\030\001\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\375\002\n\'QuantizedDepthwiseConv2DWithBiasAndRelu\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\010\n\004bias\030\001\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\326\003\n4QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize\022\017\n\005input\"\006Tinput\022\021\n\006filter\"\007Tfilter\022\r\n\004bias\"\005Tbias\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\022\016\n\nmin_filter\030\001\022\016\n\nmax_filter\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\022\n\006output\"\010out_type\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\032\n\007Tfilter\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\" \n\tdilations\022\tlist(int)\032\010\n\006\032\004\001\001\001\001\n\347\002\n\027QuantizedMatMulWithBias\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\r\n\004bias\"\005Tbias\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\">\n\020input_quant_mode\022\006string\032\013\022\tMIN_FIRST:\025\n\023\022\tMIN_FIRST\022\006SCALED\n\322\002\n\036QuantizedMatMulWithBiasAndRelu\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\010\n\004bias\030\001\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\">\n\020input_quant_mode\022\006string\032\013\022\tMIN_FIRST:\025\n\023\022\tMIN_FIRST\022\006SCALED\n\253\003\n+QuantizedMatMulWithBiasAndReluAndRequantize\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\r\n\004bias\"\005Tbias\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\022\026\n\022min_freezed_output\030\001\022\026\n\022max_freezed_output\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\005Tbias\022\004type:\006\n\0042\002\001\r\"\036\n\007Toutput\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\">\n\020input_quant_mode\022\006string\032\013\022\tMIN_FIRST:\025\n\023\022\tMIN_FIRST\022\006SCALED\n\315\001\n\020QuantizedMaxPool\022\n\n\005input\"\001T\022\r\n\tmin_input\030\001\022\r\n\tmax_input\030\001\032\013\n\006output\"\001T\032\016\n\nmin_output\030\001\032\016\n\nmax_output\030\001\"\024\n\001T\022\004type:\t\n\0072\005\013\014\r\017\020\"\022\n\005ksize\022\tlist(int)\"\024\n\007strides\022\tlist(int)\"\"\n\007padding\022\006string:\017\n\r\022\004SAME\022\005VALID\n\306\001\n\rQuantizedRelu\022\022\n\010features\"\006Tinput\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\307\001\n\016QuantizedRelu6\022\022\n\010features\"\006Tinput\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\326\001\n\016QuantizedReluX\022\022\n\010features\"\006Tinput\022\r\n\tmax_value\030\001\022\020\n\014min_features\030\001\022\020\n\014max_features\030\001\032\027\n\013activations\"\010out_type\032\023\n\017min_activations\030\001\032\023\n\017max_activations\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\037\n\010out_type\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\nE\n\004Relu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\034\n\001T\022\004type:\021\n\0172\r\001\002\003\004\005\006\t\016\021\023\026\027\013\nE\n\005Relu6\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nW\n\tRelu6Grad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nV\n\010ReluGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n<\n\004Selu\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nM\n\010SeluGrad\022\016\n\tgradients\"\001T\022\014\n\007outputs\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n9\n\007Softmax\022\013\n\006logits\"\001T\032\014\n\007softmax\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nj\n\035SoftmaxCrossEntropyWithLogits\022\r\n\010features\"\001T\022\013\n\006labels\"\001T\032\t\n\004loss\"\001T\032\r\n\010backprop\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n@\n\010Softplus\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nR\n\014SoftplusGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n@\n\010Softsign\022\r\n\010features\"\001T\032\020\n\013activations\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\nR\n\014SoftsignGrad\022\016\n\tgradients\"\001T\022\r\n\010features\"\001T\032\016\n\tbackprops\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\n\223\001\n#SparseSoftmaxCrossEntropyWithLogits\022\r\n\010features\"\001T\022\021\n\006labels\"\007Tlabels\032\t\n\004loss\"\001T\032\r\n\010backprop\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\023\016\001\002\"\033\n\007Tlabels\022\004type\032\0020\t:\006\n\0042\002\003\t\n\201\001\n\004TopK\022\n\n\005input\"\001T\032\013\n\006values\"\001T\032\013\n\007indices\030\003\"\n\n\001k\022\003int(\001\"\022\n\006sorted\022\004bool\032\002(\001\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027B\026\010\007\022\022Use TopKV2 instead\nf\n\006TopKV2\022\n\n\005input\"\001T\022\005\n\001k\030\003\032\013\n\006values\"\001T\032\013\n\007indices\030\003\"\022\n\006sorted\022\004bool\032\002(\001\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027")
| 39.549848 | 45,459 | 0.657315 |
cce880ed9e020e9c711c787f8e390d525b6d9c5d | 4,487 | py | Python | Aulas/Módulos_Python/CSV-Comma_Separated_Values/main.py | edersonhs/Python3_Basico_Ao_Avancado | a15754a6fbca407d5a7a7ed4116c2710b4635594 | [
"MIT"
] | null | null | null | Aulas/Módulos_Python/CSV-Comma_Separated_Values/main.py | edersonhs/Python3_Basico_Ao_Avancado | a15754a6fbca407d5a7a7ed4116c2710b4635594 | [
"MIT"
] | null | null | null | Aulas/Módulos_Python/CSV-Comma_Separated_Values/main.py | edersonhs/Python3_Basico_Ao_Avancado | a15754a6fbca407d5a7a7ed4116c2710b4635594 | [
"MIT"
] | null | null | null | """
Comma Separated Values - CSV (Valores separados por vírgula)
É um formato de dados muito usado em tabelas (Excel, Google Sheets), base de
dados, clientes de e-mail, etc...
Cada linha do csv representa uma linha da tabela e cada informação separada
por virgula representa uma tabela da coluna da tabela.
"""
import csv
caminho_csv = r'Aulas\Módulos_Python\CSV-Comma_Separated_Values\clientes.csv'
# ########################### Lendo arquivo CSV ############################
with open(caminho_csv, 'r', encoding='utf8') as arquivo:
# Vai retornar um gerador, ou seja, só vai ser possivel acessar os dados
# dentro deste with.
dados = csv.reader(arquivo) # Lendo o arquivo csv e inserindo em dados
# Dados será um iterador e cada linha do csv será uma lista com os
# elementos separados por virgula.
# next(dados) # Caso seja necessario pular o cabeçalho
for dado in dados:
print(dado)
print()
# Retornando o csv como dicionario
with open(caminho_csv, 'r', encoding='utf8') as arquivo:
# Vai retornar um gerador, ou seja, só vai ser possivel acessar os dados
# dentro deste with.
dados = csv.DictReader(arquivo) # Retorna cada linha como um dicionario.
for dado in dados:
print(dado)
# Exemplo de print dos nomes do dicionario através das chaves
# for dado in dados:
# print(dado['Nome'], dado['Sobrenome'], dado['E-mail'])
print()
# ###################### Lendo arquivo CSV fora do with #######################
with open(caminho_csv, 'r', encoding='utf8') as arquivo:
# Para que a variavel possa ser acessada fora do escopo do with, deve-se
# utilizar de list comprehension
dados = [x for x in csv.DictReader(arquivo)]
# Vai retornar uma lista, com o dicionario de cada linha do arquivo csv
# ####### Convertendo do python para csv e gravando em um arquivo #############
with open('clientes2.csv', 'w', encoding='utf8') as arquivo:
escreve = csv.writer( # Configurando o objeto de escrita no arquivo csv
arquivo, # Arquivo onde os dados serão escritos
delimiter=',', # Definindo o delimitador que ficará entre os dados
# Definindo o caractere de aspas do arquivo para evitar possiveis
# problemas (Opcional)
quotechar='"',
# Definindo que todos os dados sejam delimitados por aspas, como uma
# string. (Opcional)
quoting=csv.QUOTE_ALL
)
# Montando a primeira linha (Onde seria os titulos da tabela)
chaves = dados[0].keys() # Pegando todas as chaves do dicionario
chaves = list(chaves) # Convertendo para uma lista
escreve.writerow( # Escreve linha no arquivo
[ # Dados a serem inseridos na primeira linha do arquivo csv
chaves[0], # Titulo "Nome"
chaves[1], # Titulo "Sobrenome"
chaves[2], # Titulo "E-mail"
chaves[3] # Titulo "Telefone"
]
)
for dado in dados:
escreve.writerow( # Escreve linha no arquivo
[ # Dados a serem inseridos na linha do arquivo csv
dado['Nome'],
dado['Sobrenome'],
dado['E-mail'],
dado['Telefone']
]
)
"""
util:
PARA IMPORTAÇÃO LEGIVEL DO CSV NO EXCEL:
O arquivo CSV é salvo com apenas "uma coluna". Digo isso entre aspas
pois na verdade ele está sim separado em mais de uma coluna, só para
entendimento CSV significa Comma Separate Values, ou seja, Valores
Separados por Virgula.
Sendo assim, a vírgula é um delimitador desse tipo de arquivo e a coluna é
separada por virgula, para fazer com que o excel separe as colunas por
virgulas tem várias maneiras, a mais fácil (para mim) é:
1 - Abra um arquivo no Excel e clique em “Dados”. Em seguida selecione a opção
“De Texto”;
2 - Selecione o arquivo CSV desejado e clique em “Importar”;
3 - Depois disso, selecione a opção “Delimitado”. Em seguida, clique em na
opção “Avançar”;
4 - Para que a importação seja feita com sucesso é preciso atentar para um
detalhe importante. Nesta etapa, é imprescindível selecionar a opção “Vírgula”
antes de clicar em “Avançar”.
Com isso, já é possível perceber na opção “Visualização dos Dados” que as
informações começam a aparecer de forma legível.;
5 - Na tela seguinte é disponibilizado um passo considerado opcional. Isso
porque alguns arquivos CSV transformam vírgulas ( , ) em pontos ( . );
Pronto, agora você tem eles separados em colunas no excel.
"""
| 38.025424 | 79 | 0.667484 |
bc1d0de4ccfcf7131de99f45e145d0d2ee582129 | 1,833 | py | Python | workbench/urls.py | nedbat-test-external/xblock-sdk | 8ccf74eace81ba22ea93c42caada11d4c5870e01 | [
"Apache-2.0"
] | null | null | null | workbench/urls.py | nedbat-test-external/xblock-sdk | 8ccf74eace81ba22ea93c42caada11d4c5870e01 | [
"Apache-2.0"
] | null | null | null | workbench/urls.py | nedbat-test-external/xblock-sdk | 8ccf74eace81ba22ea93c42caada11d4c5870e01 | [
"Apache-2.0"
] | null | null | null | """Provide XBlock urls"""
from __future__ import absolute_import
from django.conf.urls import url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from workbench import views
admin.autodiscover()
urlpatterns = [
url(r'^$', views.index, name='workbench_index'),
url(
r'^scenario/(?P<scenario_id>[^/]+)/(?P<view_name>[^/]+)/$',
views.show_scenario,
name='scenario'
),
url(r'^userlist/$',
views.user_list,
name='userlist'),
url(
r'^scenario/(?P<scenario_id>[^/]+)/$',
views.show_scenario,
name='workbench_show_scenario'
),
url(
r'^view/(?P<scenario_id>[^/]+)/(?P<view_name>[^/]+)/$',
views.show_scenario,
{'template': 'workbench/blockview.html'}
),
url(
r'^view/(?P<scenario_id>[^/]+)/$',
views.show_scenario,
{'template': 'workbench/blockview.html'}
),
url(
r'^handler/(?P<usage_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.handler, {'authenticated': True},
name='handler'
),
url(
r'^aside_handler/(?P<aside_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.aside_handler, {'authenticated': True},
name='aside_handler'
),
url(
r'^unauth_handler/(?P<usage_id>[^/]+)/(?P<handler_slug>[^/]*)(?:/(?P<suffix>.*))?$',
views.handler, {'authenticated': False},
name='unauth_handler'
),
url(
r'^resource/(?P<block_type>[^/]+)/(?P<resource>.*)$',
views.package_resource,
name='package_resource'
),
url(
r'^reset_state$',
views.reset_state,
name='reset_state'
),
url(r'^admin/', admin.site.urls),
]
urlpatterns += staticfiles_urlpatterns()
| 26.955882 | 92 | 0.555374 |
cc65e648ff868673a47ee275cdcd5e42725c1a71 | 464 | py | Python | blender/arm/logicnode/value_get_material.py | DsmMatt/armory | 3fa9321016f6e83b2c1009e5ce220566fb35011e | [
"Zlib"
] | 1 | 2018-12-04T05:33:53.000Z | 2018-12-04T05:33:53.000Z | blender/arm/logicnode/value_get_material.py | DsmMatt/armory | 3fa9321016f6e83b2c1009e5ce220566fb35011e | [
"Zlib"
] | null | null | null | blender/arm/logicnode/value_get_material.py | DsmMatt/armory | 3fa9321016f6e83b2c1009e5ce220566fb35011e | [
"Zlib"
] | null | null | null | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class GetMaterialNode(Node, ArmLogicTreeNode):
'''Get material node'''
bl_idname = 'LNGetMaterialNode'
bl_label = 'Get Material'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketObject', 'Object')
self.outputs.new('NodeSocketShader', 'Material')
add_node(GetMaterialNode, category='Value')
| 27.294118 | 56 | 0.713362 |
ade5a10959cf6d5c546434d3fe4bbe18e4a5647a | 8,170 | py | Python | sharing_portal/forms.py | ChameleonCloud/portal | 92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee | [
"Apache-2.0"
] | 3 | 2015-08-04T20:53:41.000Z | 2020-02-14T22:58:20.000Z | sharing_portal/forms.py | ChameleonCloud/portal | 92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee | [
"Apache-2.0"
] | 103 | 2015-01-15T14:21:00.000Z | 2022-03-31T19:14:20.000Z | sharing_portal/forms.py | ChameleonCloud/portal | 92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee | [
"Apache-2.0"
] | 4 | 2016-02-22T16:48:20.000Z | 2021-01-08T17:13:21.000Z | from django.db.models import Q
from django.core.exceptions import ValidationError
from django import forms
from django.forms import widgets
from projects.models import Project
from util.project_allocation_mapper import ProjectAllocationMapper
from .models import Artifact, ArtifactVersion, Author, Label, DaypassRequest
import logging
LOG = logging.getLogger(__name__)
class ArtifactForm(forms.ModelForm):
class Meta:
model = Artifact
fields = (
"title",
"short_description",
"description",
"labels",
)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request")
super().__init__(*args, **kwargs)
# Only allow staff members to use the special "Chameleon-supported"
# label on the artifact.
if self.request.user.is_staff:
available_labels = Label.objects.all()
else:
available_labels = Label.objects.filter(~Q(label=Label.CHAMELEON_SUPPORTED))
self.fields["labels"] = forms.ModelMultipleChoiceField(
available_labels, required=False)
def clean_labels(self):
labels = self.cleaned_data["labels"]
# Ensure only staff members can save w/ the "chameleon" label.
if (
any(l.label == Label.CHAMELEON_SUPPORTED for l in labels)
and not self.request.user.is_staff
):
raise ValidationError("Invalid label")
return labels
class ArtifactVersionForm(forms.ModelForm):
readonly_fields = ('deposition_id', 'deposition_repo',)
def __init__(self, *args, **kwargs):
super(ArtifactVersionForm, self).__init__(*args, **kwargs)
for readonly_field in self.readonly_fields:
self.fields[readonly_field].widget.attrs['readonly'] = True
class Meta:
model = ArtifactVersion
fields = ('deposition_id', 'deposition_repo',)
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ('name', 'affiliation',)
def save(self, commit=True):
# Perform a lookup to see if this field combo already exists.
# If it does, then just return it from the DB. Else, create a new
# entry. This allows users to add new authors, but it avoids creating
# duplicates.
try:
self.instance = self.Meta.model.objects.get(
name=self.instance.name,
affiliation=self.instance.affiliation)
except self.Meta.model.DoesNotExist:
pass
return super(AuthorForm, self).save(commit=commit)
AuthorFormset = forms.modelformset_factory(
AuthorForm.Meta.model, form=AuthorForm, can_delete=True,
extra=2, min_num=1, max_num=3)
class ShareArtifactForm(forms.Form):
class ProjectChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, project):
return project.nickname or project.charge_code
is_public = forms.BooleanField(
label="Enable all users to find and share",
required=False,
widget=widgets.CheckboxInput(attrs={"v-model": "is_public"}),
)
is_reproducible = forms.BooleanField(
label="Enable reproducibility requests",
required=False,
widget=widgets.CheckboxInput(attrs={"v-model": "is_reproducible"}),
)
reproduce_hours = forms.IntegerField(
label="Hours a user has to reproduce", required=False
)
projects = ProjectChoiceField(
label="Share with projects", required=False, queryset=Project.objects.all()
)
# Custom init is required to dynamically fill the projects choice field
def __init__(self, request, *args, **kwargs):
super(ShareArtifactForm, self).__init__(*args, **kwargs)
mapper = ProjectAllocationMapper(request)
user_projects = [
(
project["chargeCode"],
project["nickname"] if "nickname" in project else project["chargeCode"],
)
for project in mapper.get_user_projects(
request.user.username, to_pytas_model=False
)
]
self.fields["project"] = forms.ChoiceField(
label="Belongs to project",
required=False,
choices=[(None, "----")] + user_projects,
)
def clean(self):
data = self.cleaned_data
if data.get("is_reproducible", None) and not data.get("reproduce_hours", None):
raise forms.ValidationError(
"You must include hours when enabling reproducibility requests"
)
if data.get("is_reproducible", None) and not data.get("project", None):
raise forms.ValidationError(
"You must associate this artifact with a project to enable reproducibility requests"
)
return data
class ZenodoPublishForm(forms.Form):
artifact_version_id = forms.CharField(widget=forms.HiddenInput())
request_doi = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
label = kwargs.pop('label', 'Request DOI')
force_disable = kwargs.pop('force_disable', False)
super(ZenodoPublishForm, self).__init__(*args, **kwargs)
doi_field = self.fields['request_doi']
doi_field.disabled = self._has_doi() or force_disable
if self._has_doi():
doi_field.label = 'Published as {}'.format(self.model.doi)
else:
doi_field.label = label
def get_initial_for_field(self, field, field_name):
if field_name == 'artifact_version_id':
return self.model.pk
elif field_name == 'request_doi':
return self._has_doi()
return None
def clean(self):
"""Override clean to avoid setting form data when a DOI is assigned.
This prevents duplicate requests to try to request a DOI for an
artifact version.
"""
if self._has_doi():
self.cleaned_data = {}
return super(ZenodoPublishForm, self).clean()
def _has_doi(self):
return self.model.doi is not None
class BaseZenodoPublishFormset(forms.BaseFormSet):
def __init__(self, *args, **kwargs):
artifact_versions = kwargs.pop('artifact_versions')
if not artifact_versions:
raise ValueError('artifact_versions must provided')
self.artifact_versions = artifact_versions
self.latest_published_version = max([
i for i, v in enumerate(artifact_versions) if v.doi
] or [-1])
kwargs['initial'] = [{} for _ in artifact_versions]
super(BaseZenodoPublishFormset, self).__init__(*args, **kwargs)
def get_form_kwargs(self, index):
"""Pass the linked artifact version model through to the nested form.
"""
future_version_published = index < self.latest_published_version
return {
'model': self.artifact_versions[index],
# Prevent publishing versions behind the latest published version
'force_disable': future_version_published,
'label': ('(cannot request DOI for past versions)'
if future_version_published else None)
}
@property
def cleaned_data(self):
"""Override cleaned_data to ignore forms with empty data.
"""
return [x for x in super(BaseZenodoPublishFormset, self).cleaned_data if x]
ZenodoPublishFormset = forms.formset_factory(
ZenodoPublishForm, formset=BaseZenodoPublishFormset, extra=0
)
class RequestDaypassForm(forms.Form):
name = forms.CharField()
email = forms.CharField(disabled=True, required=False)
institution = forms.CharField()
reason = forms.CharField(
widget=forms.Textarea(attrs={"placeholder": "Reason for request"}),
)
class ReviewDaypassForm(forms.Form):
status = forms.ChoiceField(required=True, choices=DaypassRequest.STATUS)
def clean(self):
data = self.cleaned_data
if data.get("status", None) == DaypassRequest.STATUS_PENDING:
raise forms.ValidationError("You must set a status")
| 35.064378 | 100 | 0.647246 |
f1e1f924901bf23328a5cb55a02093e8efac2315 | 2,917 | py | Python | manuka/migrations/env.py | NeCTAR-RC/manuka | 93e2eaa7dd8bbb499132f564b727ea3cb21302d9 | [
"Apache-2.0"
] | 1 | 2021-08-16T14:50:35.000Z | 2021-08-16T14:50:35.000Z | warre/migrations/env.py | NeCTAR-RC/warre | 4ca9bfaa6d0568cc8268d570b36c2c0fcb0f9d8e | [
"Apache-2.0"
] | null | null | null | warre/migrations/env.py | NeCTAR-RC/warre | 4ca9bfaa6d0568cc8268d570b36c2c0fcb0f9d8e | [
"Apache-2.0"
] | null | null | null | from __future__ import with_statement
import logging
from logging.config import fileConfig
from alembic import context
from flask import current_app
from sqlalchemy import engine_from_config
from sqlalchemy import pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
config.set_main_option(
'sqlalchemy.url',
str(current_app.extensions['migrate'].db.engine.url).replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 29.765306 | 77 | 0.712376 |
ee54c2fddaaa642b3af67ef545893d9ea40b7c6b | 1,532 | py | Python | tests/cloudformation/checks/resource/aws/test_CloudWatchLogGroupRetention.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | tests/cloudformation/checks/resource/aws/test_CloudWatchLogGroupRetention.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | tests/cloudformation/checks/resource/aws/test_CloudWatchLogGroupRetention.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import os
import unittest
from checkov.cloudformation.checks.resource.aws.CloudWatchLogGroupRetention import check
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestCloudWatchLogGroupRetention(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_CloudWatchLogGroupRetention"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
for record in report.failed_checks:
self.assertEqual(record.check_id, check.id)
for record in report.passed_checks:
self.assertEqual(record.check_id, check.id)
passing_resources = {
"AWS::Logs::LogGroup.Pass",
}
failing_resources = {
"AWS::Logs::LogGroup.Fail",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 1)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == '__main__':
unittest.main()
| 32.595745 | 101 | 0.693864 |
0ace401e3a8e2f0d96f53e4997531c92b17664a7 | 184,187 | py | Python | lhotse/cut.py | luomingshuang/lhotse | 7c1e3bd33f3b1ef23b67ee3c2e70443bf302e655 | [
"Apache-2.0"
] | null | null | null | lhotse/cut.py | luomingshuang/lhotse | 7c1e3bd33f3b1ef23b67ee3c2e70443bf302e655 | [
"Apache-2.0"
] | null | null | null | lhotse/cut.py | luomingshuang/lhotse | 7c1e3bd33f3b1ef23b67ee3c2e70443bf302e655 | [
"Apache-2.0"
] | null | null | null | import logging
import random
import warnings
from concurrent.futures import Executor, ProcessPoolExecutor
from dataclasses import dataclass, field
from functools import partial, reduce
from itertools import chain, islice
from math import ceil, floor
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
FrozenSet,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from intervaltree import Interval, IntervalTree
from tqdm.auto import tqdm
from typing_extensions import Literal
from lhotse.audio import AudioMixer, AudioSource, Recording, RecordingSet
from lhotse.augmentation import AugmentFn
from lhotse.features import (
FeatureExtractor,
FeatureMixer,
FeatureSet,
Features,
create_default_feature_extractor,
)
from lhotse.features.base import compute_global_stats
from lhotse.features.io import FeaturesWriter, LilcomFilesWriter, LilcomHdf5Writer
from lhotse.serialization import Serializable
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import (
DEFAULT_PADDING_VALUE,
Decibels,
LOG_EPSILON,
NonPositiveEnergyError,
Pathlike,
Seconds,
SetContainingAnything,
TimeSpan,
asdict_nonull,
compute_num_frames,
compute_num_samples,
compute_start_duration_for_extended_cut,
exactly_one_not_null,
fastcopy,
ifnone,
index_by_id_and_check,
measure_overlap,
overlaps,
overspans,
perturb_num_samples,
rich_exception_info,
split_sequence,
uuid4,
)
# One of the design principles for Cuts is a maximally "lazy" implementation, e.g. when mixing Cuts,
# we'd rather sum the feature matrices only after somebody actually calls "load_features". It helps to avoid
# an excessive storage size for data augmented in various ways.
FW = TypeVar("FW", bound=FeaturesWriter)
class Cut:
"""
.. caution::
:class:`~lhotse.cut.Cut` is just an abstract class -- the actual logic is implemented by its child classes (scroll down for references).
:class:`~lhotse.cut.Cut` is a base class for audio cuts.
An "audio cut" is a subset of a :class:`~lhotse.audio.Recording` -- it can also be thought of as a "view"
or a pointer to a chunk of audio.
It is not limited to audio data -- cuts may also point to (sub-spans of) precomputed
:class:`~lhotse.features.base.Features`.
Cuts are different from :class:`~lhotse.supervision.SupervisionSegment` in that they may be arbitrarily
longer or shorter than supervisions; cuts may even contain multiple supervisions for creating contextual
training data, and unsupervised regions that provide real or synthetic acoustic background context
for the supervised segments.
The following example visualizes how a cut may represent a part of a single-channel recording with
two utterances and some background noise in between::
Recording
|-------------------------------------------|
"Hey, Matt!" "Yes?" "Oh, nothing"
|----------| |----| |-----------|
Cut1
|------------------------|
This scenario can be represented in code, using :class:`~lhotse.cut.MonoCut`, as::
>>> from lhotse import Recording, SupervisionSegment, MonoCut
>>> rec = Recording(id='rec1', duration=10.0, sampling_rate=8000, num_samples=80000, sources=[...])
>>> sups = [
... SupervisionSegment(id='sup1', recording_id='rec1', start=0, duration=3.37, text='Hey, Matt!'),
... SupervisionSegment(id='sup2', recording_id='rec1', start=4.5, duration=0.9, text='Yes?'),
... SupervisionSegment(id='sup3', recording_id='rec1', start=6.9, duration=2.9, text='Oh, nothing'),
... ]
>>> cut = MonoCut(id='rec1-cut1', start=0.0, duration=6.0, channel=0, recording=rec,
... supervisions=[sups[0], sups[1]])
.. note::
All Cut classes assume that the :class:`~lhotse.supervision.SupervisionSegment` time boundaries are relative
to the beginning of the cut.
E.g. if the underlying :class:`~lhotse.audio.Recording` starts at 0s (always true), the cut starts at 100s,
and the SupervisionSegment inside the cut starts at 3s, it really did start at 103rd second of the recording.
In some cases, the supervision might have a negative start, or a duration exceeding the duration of the cut;
this means that the supervision in the recording extends beyond the cut.
Cut allows to check and read audio data or features data::
>>> assert cut.has_recording
>>> samples = cut.load_audio()
>>> if cut.has_features:
... feats = cut.load_features()
It can be visualized, and listened to, inside Jupyter Notebooks::
>>> cut.plot_audio()
>>> cut.play_audio()
>>> cut.plot_features()
Cuts can be used with Lhotse's :class:`~lhotse.features.base.FeatureExtractor` to compute features.
>>> from lhotse import Fbank
>>> feats = cut.compute_features(extractor=Fbank())
It is also possible to use a :class:`~lhotse.features.io.FeaturesWriter` to store the features and attach
their manifest to a copy of the cut::
>>> from lhotse import LilcomHdf5Writer
>>> with LilcomHdf5Writer('feats.h5') as storage:
... cut_with_feats = cut.compute_and_store_features(
... extractor=Fbank(),
... storage=storage
... )
Cuts have several methods that allow their manipulation, transformation, and mixing.
Some examples (see the respective methods documentation for details)::
>>> cut_2_to_4s = cut.truncate(offset=2, duration=2)
>>> cut_padded = cut.pad(duration=10.0)
>>> cut_mixed = cut.mix(other_cut, offset_other_by=5.0, snr=20)
>>> cut_append = cut.append(other_cut)
>>> cut_24k = cut.resample(24000)
>>> cut_sp = cut.perturb_speed(1.1)
>>> cut_vp = cut.perturb_volume(2.)
.. note::
All cut transformations are performed lazily, on-the-fly, upon calling ``load_audio`` or ``load_features``.
The stored waveforms and features are untouched.
.. caution::
Operations on cuts are not mutating -- they return modified copies of :class:`.Cut` objects,
leaving the original object unmodified.
A :class:`.Cut` that contains multiple segments (:class:`SupervisionSegment`) can be decayed into
smaller cuts that correspond directly to supervisions::
>>> smaller_cuts = cut.trim_to_supervisions()
Cuts can be detached from parts of their metadata::
>>> cut_no_feat = cut.drop_features()
>>> cut_no_rec = cut.drop_recording()
>>> cut_no_sup = cut.drop_supervisions()
Finally, cuts provide convenience methods to compute feature frame and audio sample masks for supervised regions::
>>> sup_frames = cut.supervisions_feature_mask()
>>> sup_samples = cut.supervisions_audio_mask()
See also:
- :class:`lhotse.cut.MonoCut`
- :class:`lhotse.cut.MixedCut`
- :class:`lhotse.cut.CutSet`
"""
# The following is the list of members and properties implemented by the child classes.
# They are not abstract properties because dataclasses do not work well with the "abc" module.
id: str
start: Seconds
duration: Seconds
sampling_rate: int
supervisions: List[SupervisionSegment]
num_samples: Optional[int]
num_frames: Optional[int]
num_features: Optional[int]
frame_shift: Optional[Seconds]
features_type: Optional[str]
has_recording: bool
has_features: bool
# The following is the list of methods implemented by the child classes.
# They are not abstract methods because dataclasses do not work well with the "abc" module.
# Check a specific child class for their documentation.
from_dict: Callable[[Dict], "Cut"]
load_audio: Callable[[], np.ndarray]
load_features: Callable[[], np.ndarray]
compute_and_store_features: Callable
drop_features: Callable
drop_recording: Callable
drop_supervisions: Callable
truncate: Callable
pad: Callable
resample: Callable
perturb_speed: Callable
perturb_tempo: Callable
perturb_volume: Callable
map_supervisions: Callable
filter_supervisions: Callable
with_features_path_prefix: Callable
with_recording_path_prefix: Callable
def to_dict(self) -> dict:
d = asdict_nonull(self)
return {**d, "type": type(self).__name__}
@property
def trimmed_supervisions(self) -> List[SupervisionSegment]:
"""
Return the supervisions in this Cut that have modified time boundaries so as not to exceed
the Cut's start or end.
Note that when ``cut.supervisions`` is called, the supervisions may have negative ``start``
values that indicate the supervision actually begins before the cut, or ``end`` values
that exceed the Cut's duration (it means the supervision continued in the original recording
after the Cut's ending).
.. caution::
For some tasks such as speech recognition (ASR), trimmed supervisions
could result in corrupted training data. This is because a part of the transcript
might actually reside outside of the cut.
"""
return [s.trim(self.duration) for s in self.supervisions]
def mix(
self,
other: "Cut",
offset_other_by: Seconds = 0.0,
snr: Optional[Decibels] = None,
preserve_id: Optional[str] = None,
) -> "MixedCut":
"""Refer to :function:`~lhotse.cut.mix` documentation."""
return mix(
self, other, offset=offset_other_by, snr=snr, preserve_id=preserve_id
)
def append(
self,
other: "Cut",
snr: Optional[Decibels] = None,
preserve_id: Optional[str] = None,
) -> "MixedCut":
"""
Append the ``other`` Cut after the current Cut. Conceptually the same as ``mix`` but with an offset
matching the current cuts length. Optionally scale down (positive SNR) or scale up (negative SNR)
the ``other`` cut.
Returns a MixedCut, which only keeps the information about the mix; actual mixing is performed
during the call to ``load_features``.
:param preserve_id: optional string ("left", "right"). When specified, append will preserve the cut ID
of the left- or right-hand side argument. Otherwise, a new random ID is generated.
"""
return mix(self, other, offset=self.duration, snr=snr, preserve_id=preserve_id)
def compute_features(
self,
extractor: FeatureExtractor,
augment_fn: Optional[AugmentFn] = None,
) -> np.ndarray:
"""
Compute the features from this cut. This cut has to be able to load audio.
:param extractor: a ``FeatureExtractor`` instance used to compute the features.
:param augment_fn: optional ``WavAugmenter`` instance for audio augmentation.
:return: a numpy ndarray with the computed features.
"""
samples = self.load_audio()
if augment_fn is not None:
samples = augment_fn(samples, self.sampling_rate)
return extractor.extract(samples, self.sampling_rate)
def plot_audio(self):
"""
Display a plot of the waveform. Requires matplotlib to be installed.
"""
import matplotlib.pyplot as plt
samples = self.load_audio().squeeze()
fig, ax = plt.subplots()
ax.plot(np.linspace(0, self.duration, len(samples)), samples)
for supervision in self.supervisions:
supervision = supervision.trim(self.duration)
ax.axvspan(supervision.start, supervision.end, color="green", alpha=0.1)
return ax
def play_audio(self):
"""
Display a Jupyter widget that allows to listen to the waveform.
Works only in Jupyter notebook/lab or similar (e.g. Colab).
"""
from IPython.display import Audio
samples = self.load_audio().squeeze()
return Audio(samples, rate=self.sampling_rate)
def plot_features(self):
"""
Display the feature matrix as an image. Requires matplotlib to be installed.
"""
import matplotlib.pyplot as plt
features = np.flip(self.load_features().transpose(1, 0), 0)
return plt.matshow(features)
def plot_alignment(self, alignment_type: str = "word"):
"""
Display the alignment on top of a spectrogram. Requires matplotlib to be installed.
"""
import matplotlib.pyplot as plt
from lhotse import Fbank
from lhotse.utils import compute_num_frames
assert (
len(self.supervisions) == 1
), "Cannot plot alignment: there has to be exactly one supervision in a Cut."
sup = self.supervisions[0]
assert (
sup.alignment is not None and alignment_type in sup.alignment
), f"Cannot plot alignment: missing alignment field or alignment type '{alignment_type}'"
fbank = Fbank()
feats = self.compute_features(fbank)
speaker = sup.speaker
language = sup.language
fig = plt.matshow(np.flip(feats.transpose(1, 0), 0))
plt.title(
"Cut ID:" + self.id + ", Speaker:" + speaker + ", Language:" + language
)
plt.tick_params(
axis="both",
which="major",
labelbottom=True,
labeltop=False,
bottom=True,
top=False,
)
for idx, item in enumerate(sup.alignment[alignment_type]):
is_even = bool(idx % 2)
end_frame = compute_num_frames(
item.end,
frame_shift=fbank.frame_shift,
sampling_rate=self.sampling_rate,
)
plt.text(
end_frame - 4,
70 if is_even else 45,
item.symbol,
fontsize=12,
color="w",
rotation="vertical",
)
plt.axvline(end_frame, color="k")
plt.show()
def trim_to_supervisions(
self,
keep_overlapping: bool = True,
min_duration: Optional[Seconds] = None,
context_direction: Literal["center", "left", "right", "random"] = "center",
) -> List["Cut"]:
"""
Splits the current :class:`.Cut` into as many cuts as there are supervisions (:class:`.SupervisionSegment`).
These cuts have identical start times and durations as the supervisions.
When there are overlapping supervisions, they can be kept or discarded via ``keep_overlapping`` flag.
For example, the following cut::
Cut
|-----------------|
Sup1
|----| Sup2
|-----------|
is transformed into two cuts::
Cut1
|----|
Sup1
|----|
Sup2
|-|
Cut2
|-----------|
Sup1
|-|
Sup2
|-----------|
:param keep_overlapping: when ``False``, it will discard parts of other supervisions that overlap with the
main supervision. In the illustration above, it would discard ``Sup2`` in ``Cut1`` and ``Sup1`` in ``Cut2``.
:param min_duration: An optional duration in seconds; specifying this argument will extend the cuts
that would have been shorter than ``min_duration`` with actual acoustic context in the recording/features.
If there are supervisions present in the context, they are kept when ``keep_overlapping`` is true.
If there is not enough context, the returned cut will be shorter than ``min_duration``.
If the supervision segment is longer than ``min_duration``, the return cut will be longer.
:param context_direction: Which direction should the cut be expanded towards to include context.
The value of "center" implies equal expansion to left and right;
random uniformly samples a value between "left" and "right".
:return: a list of cuts.
"""
cuts = []
supervisions_index = self.index_supervisions(index_mixed_tracks=True)
for segment in self.supervisions:
if min_duration is None:
# Cut boundaries are equal to the supervision segment boundaries.
new_start, new_duration = segment.start, segment.duration
else:
# Cut boundaries will be extended with some acoustic context.
new_start, new_duration = compute_start_duration_for_extended_cut(
start=segment.start,
duration=segment.duration,
new_duration=min_duration,
direction=context_direction,
)
cuts.append(
self.truncate(
offset=new_start,
duration=new_duration,
keep_excessive_supervisions=keep_overlapping,
_supervisions_index=supervisions_index,
)
)
return cuts
def index_supervisions(
self, index_mixed_tracks: bool = False, keep_ids: Optional[Set[str]] = None
) -> Dict[str, IntervalTree]:
"""
Create a two-level index of supervision segments. It is a mapping from a Cut's ID to an
interval tree that contains the supervisions of that Cut.
The interval tree can be efficiently queried for overlapping and/or enveloping segments.
It helps speed up some operations on Cuts of very long recordings (1h+) that contain many
supervisions.
:param index_mixed_tracks: Should the tracks of MixedCut's be indexed as additional, separate entries.
:param keep_ids: If specified, we will only index the supervisions with the specified IDs.
:return: a mapping from Cut ID to an interval tree of SupervisionSegments.
"""
keep_ids = ifnone(keep_ids, SetContainingAnything())
indexed = {
self.id: IntervalTree(
Interval(s.start, s.end, s)
for s in self.supervisions
if s.id in keep_ids
)
}
if index_mixed_tracks:
if isinstance(self, MixedCut):
for track in self.tracks:
indexed[track.cut.id] = IntervalTree(
Interval(s.start, s.end, s)
for s in track.cut.supervisions
if s.id in keep_ids
)
return indexed
def compute_and_store_recording(
self,
storage_path: Pathlike,
augment_fn: Optional[AugmentFn] = None,
) -> "MonoCut":
"""
Store this cut's waveform as audio recording to disk.
:param storage_path: The path to location where we will store the audio recordings.
:param augment_fn: an optional callable used for audio augmentation.
Be careful with the types of augmentations used: if they modify
the start/end/duration times of the cut and its supervisions,
you will end up with incorrect supervision information when using this API.
E.g. for speed perturbation, use ``CutSet.perturb_speed()`` instead.
:return: a new MonoCut instance.
"""
storage_path = Path(storage_path)
samples = self.load_audio()
if augment_fn is not None:
samples = augment_fn(samples, self.sampling_rate)
# Store audio as FLAC
import soundfile as sf
sf.write(
file=str(storage_path),
data=samples.transpose(),
samplerate=self.sampling_rate,
format="FLAC",
)
recording = Recording(
id=storage_path.stem,
sampling_rate=self.sampling_rate,
num_samples=samples.shape[1],
duration=samples.shape[1] / self.sampling_rate,
sources=[
AudioSource(
type="file",
channels=[0],
source=str(storage_path),
)
],
)
return MonoCut(
id=self.id,
start=0,
duration=recording.duration,
channel=0,
supervisions=self.supervisions,
recording=recording,
custom=self.custom if hasattr(self, "custom") else None,
)
def speakers_feature_mask(
self,
min_speaker_dim: Optional[int] = None,
speaker_to_idx_map: Optional[Dict[str, int]] = None,
use_alignment_if_exists: Optional[str] = None,
) -> np.ndarray:
"""
Return a matrix of per-speaker activity in a cut. The matrix shape is (num_speakers, num_frames),
and its values are 0 for nonspeech **frames** and 1 for speech **frames** for each respective speaker.
This is somewhat inspired by the TS-VAD setup: https://arxiv.org/abs/2005.07272
:param min_speaker_dim: optional int, when specified it will enforce that the matrix shape is at least
that value (useful for datasets like CHiME 6 where the number of speakers is always 4, but some cuts
might have less speakers than that).
:param speaker_to_idx_map: optional dict mapping speaker names (strings) to their global indices (ints).
Useful when you want to preserve the order of the speakers (e.g. speaker XYZ is always mapped to index 2)
:param use_alignment_if_exists: optional str, key for alignment type to use for generating the mask. If not
exists, fall back on supervision time spans.
"""
assert self.has_features, (
f"No features available. "
f"Can't compute supervisions feature mask for cut with ID: {self.id}."
)
if speaker_to_idx_map is None:
speaker_to_idx_map = {
spk: idx
for idx, spk in enumerate(
sorted(set(s.speaker for s in self.supervisions))
)
}
num_speakers = len(speaker_to_idx_map)
if min_speaker_dim is not None:
num_speakers = min(min_speaker_dim, num_speakers)
mask = np.zeros((num_speakers, self.num_frames))
for supervision in self.supervisions:
speaker_idx = speaker_to_idx_map[supervision.speaker]
if (
use_alignment_if_exists
and supervision.alignment
and use_alignment_if_exists in supervision.alignment
):
for ali in supervision.alignment[use_alignment_if_exists]:
st = round(ali.start / self.frame_shift) if ali.start > 0 else 0
et = (
round(ali.end / self.frame_shift)
if ali.end < self.duration
else self.num_frames
)
mask[speaker_idx, st:et] = 1
else:
st = (
round(supervision.start / self.frame_shift)
if supervision.start > 0
else 0
)
et = (
round(supervision.end / self.frame_shift)
if supervision.end < self.duration
else self.num_frames
)
mask[speaker_idx, st:et] = 1
return mask
def speakers_audio_mask(
self,
min_speaker_dim: Optional[int] = None,
speaker_to_idx_map: Optional[Dict[str, int]] = None,
use_alignment_if_exists: Optional[str] = None,
) -> np.ndarray:
"""
Return a matrix of per-speaker activity in a cut. The matrix shape is (num_speakers, num_samples),
and its values are 0 for nonspeech **samples** and 1 for speech **samples** for each respective speaker.
This is somewhat inspired by the TS-VAD setup: https://arxiv.org/abs/2005.07272
:param min_speaker_dim: optional int, when specified it will enforce that the matrix shape is at least
that value (useful for datasets like CHiME 6 where the number of speakers is always 4, but some cuts
might have less speakers than that).
:param speaker_to_idx_map: optional dict mapping speaker names (strings) to their global indices (ints).
Useful when you want to preserve the order of the speakers (e.g. speaker XYZ is always mapped to index 2)
:param use_alignment_if_exists: optional str, key for alignment type to use for generating the mask. If not
exists, fall back on supervision time spans.
"""
assert self.has_recording, (
f"No recording available. "
f"Can't compute supervisions audio mask for cut with ID: {self.id}."
)
if speaker_to_idx_map is None:
speaker_to_idx_map = {
spk: idx
for idx, spk in enumerate(
sorted(set(s.speaker for s in self.supervisions))
)
}
num_speakers = len(speaker_to_idx_map)
if min_speaker_dim is not None:
num_speakers = min(min_speaker_dim, num_speakers)
mask = np.zeros((num_speakers, self.num_samples))
for supervision in self.supervisions:
speaker_idx = speaker_to_idx_map[supervision.speaker]
if (
use_alignment_if_exists
and supervision.alignment
and use_alignment_if_exists in supervision.alignment
):
for ali in supervision.alignment[use_alignment_if_exists]:
st = round(ali.start * self.sampling_rate) if ali.start > 0 else 0
et = (
round(ali.end * self.sampling_rate)
if ali.end < self.duration
else self.duration * self.sampling_rate
)
mask[speaker_idx, st:et] = 1
else:
st = (
round(supervision.start * self.sampling_rate)
if supervision.start > 0
else 0
)
et = (
round(supervision.end * self.sampling_rate)
if supervision.end < self.duration
else self.duration * self.sampling_rate
)
mask[speaker_idx, st:et] = 1
return mask
def supervisions_feature_mask(
self, use_alignment_if_exists: Optional[str] = None
) -> np.ndarray:
"""
Return a 1D numpy array with value 1 for **frames** covered by at least one supervision,
and 0 for **frames** not covered by any supervision.
:param use_alignment_if_exists: optional str, key for alignment type to use for generating the mask. If not
exists, fall back on supervision time spans.
"""
return compute_supervisions_frame_mask(
self, use_alignment_if_exists=use_alignment_if_exists
)
def supervisions_audio_mask(
self, use_alignment_if_exists: Optional[str] = None
) -> np.ndarray:
"""
Return a 1D numpy array with value 1 for **samples** covered by at least one supervision,
and 0 for **samples** not covered by any supervision.
:param use_alignment_if_exists: optional str, key for alignment type to use for generating the mask. If not
exists, fall back on supervision time spans.
"""
assert self.has_recording, (
f"No recording available. "
f"Can't compute supervisions audio mask for cut with ID: {self.id}."
)
mask = np.zeros(self.num_samples, dtype=np.float32)
for supervision in self.supervisions:
if (
use_alignment_if_exists
and supervision.alignment
and use_alignment_if_exists in supervision.alignment
):
for ali in supervision.alignment[use_alignment_if_exists]:
st = round(ali.start * self.sampling_rate) if ali.start > 0 else 0
et = (
round(ali.end * self.sampling_rate)
if ali.end < self.duration
else self.duration * self.sampling_rate
)
mask[st:et] = 1.0
else:
st = (
round(supervision.start * self.sampling_rate)
if supervision.start > 0
else 0
)
et = (
round(supervision.end * self.sampling_rate)
if supervision.end < self.duration
else self.duration * self.sampling_rate
)
mask[st:et] = 1.0
return mask
def with_id(self, id_: str) -> "Cut":
"""Return a copy of the Cut with a new ID."""
return fastcopy(self, id=id_)
@dataclass
class MonoCut(Cut):
"""
:class:`~lhotse.cut.MonoCut` is a :class:`~lhotse.cut.Cut` of a single channel of
a :class:`~lhotse.audio.Recording`. In addition to Cut, it has a specified channel attribute. This is the most commonly used type of cut.
Please refer to the documentation of :class:`~lhotse.cut.Cut` to learn more about using cuts.
See also:
- :class:`lhotse.cut.Cut`
- :class:`lhotse.cut.MixedCut`
- :class:`lhotse.cut.CutSet`
"""
id: str
# Begin and duration are needed to specify which chunk of features/recording to load.
start: Seconds
duration: Seconds
channel: int
# Supervisions that will be used as targets for model training later on. They don't have to cover the whole
# cut duration. They also might overlap.
supervisions: List[SupervisionSegment] = field(default_factory=list)
# The features can span longer than the actual cut - the Features object "knows" its start and end time
# within the underlying recording. We can expect the interval [begin, begin + duration] to be a subset of the
# interval represented in features.
features: Optional[Features] = None
# For the cases that the model was trained by raw audio instead of features
recording: Optional[Recording] = None
# Store anything else the user might want.
custom: Optional[Dict[str, Any]] = None
def __setattr__(self, key: str, value: Any):
"""
This magic function is called when the user tries to set an attribute.
We use it as syntactic sugar to store custom attributes in ``self.custom``
field, so that they can be (de)serialized later.
"""
if key in self.__dataclass_fields__:
super().__setattr__(key, value)
else:
custom = ifnone(self.custom, {})
custom[key] = value
self.custom = custom
def __getattr__(self, name: str) -> Any:
"""
This magic function is called when the user tries to access an attribute
of :class:`.MonoCut` that doesn't exist. It is used for accessing the custom
attributes of cuts.
We use it to look up the ``custom`` field: when it's None or empty,
we'll just raise AttributeError as usual.
If ``item`` is found in ``custom``, we'll return ``custom[item]``.
If ``item`` starts with "load_", we'll assume the name of the relevant
attribute comes after that, and that value of that field is of type
:class:`~lhotse.array.Array` or :class:`~lhotse.array.TemporalArray`.
We'll return its ``load`` method to call by the user.
Example of attaching and reading an alignment as TemporalArray::
>>> cut = MonoCut('cut1', start=0, duration=4, channel=0)
>>> cut.alignment = TemporalArray(...)
>>> ali = cut.load_alignment()
"""
custom = self.custom
if custom is None:
raise AttributeError(f"No such attribute: {name}")
if name in custom:
# Somebody accesses raw [Temporal]Array manifest
# or wrote a custom piece of metadata into MonoCut.
return self.custom[name]
elif name.startswith("load_"):
# Return the method for loading [Temporal]Arrays,
# to be invoked by the user.
attr_name = name[5:]
return partial(self.load_custom, attr_name)
raise AttributeError(f"No such attribute: {name}")
def load_custom(self, name: str) -> np.ndarray:
"""
Load custom data as numpy array. The custom data is expected to have
been stored in cuts ``custom`` field as an :class:`~lhotse.array.Array` or
:class:`~lhotse.array.TemporalArray` manifest.
.. note:: It works with Array manifests stored via attribute assignments,
e.g.: ``cut.my_custom_data = Array(...)``.
:param name: name of the custom attribute.
:return: a numpy array with the data.
"""
from lhotse.array import Array, TemporalArray
value = self.custom.get(name)
if isinstance(value, Array):
# We return the method to read Array (it is called in the user's code).
return value.load()
elif isinstance(value, TemporalArray):
# TemporalArray supports slicing (note we return the method, without evaluating it).
return value.load(start=self.start, duration=self.duration)
else:
raise ValueError(
f"To load {name}, the cut needs to have field {name} (or cut.custom['{name}']) "
f"defined, and its value has to be a manifest of type Array or TemporalArray."
)
@property
def recording_id(self) -> str:
return self.recording.id if self.has_recording else self.features.recording_id
@property
def end(self) -> Seconds:
return round(self.start + self.duration, ndigits=8)
@property
def has_features(self) -> bool:
return self.features is not None
@property
def has_recording(self) -> bool:
return self.recording is not None
@property
def frame_shift(self) -> Optional[Seconds]:
return self.features.frame_shift if self.has_features else None
@property
def num_frames(self) -> Optional[int]:
return (
compute_num_frames(
duration=self.duration,
frame_shift=self.frame_shift,
sampling_rate=self.sampling_rate,
)
if self.has_features
else None
)
@property
def num_samples(self) -> Optional[int]:
return (
compute_num_samples(self.duration, self.sampling_rate)
if self.has_recording
else None
)
@property
def num_features(self) -> Optional[int]:
return self.features.num_features if self.has_features else None
@property
def features_type(self) -> Optional[str]:
return self.features.type if self.has_features else None
@property
def sampling_rate(self) -> int:
return (
self.features.sampling_rate
if self.has_features
else self.recording.sampling_rate
)
@rich_exception_info
def load_features(self) -> Optional[np.ndarray]:
"""
Load the features from the underlying storage and cut them to the relevant
[begin, duration] region of the current MonoCut.
"""
if self.has_features:
feats = self.features.load(start=self.start, duration=self.duration)
# Note: we forgive off-by-one errors in the feature matrix frames
# due to various hard-to-predict floating point arithmetic issues.
# If needed, we will remove or duplicate the last frame to be
# consistent with the manifests declared "num_frames".
if feats.shape[0] - self.num_frames == 1:
feats = feats[: self.num_frames, :]
elif feats.shape[0] - self.num_frames == -1:
feats = np.concatenate((feats, feats[-1:, :]), axis=0)
return feats
return None
@rich_exception_info
def load_audio(self) -> Optional[np.ndarray]:
"""
Load the audio by locating the appropriate recording in the supplied RecordingSet.
The audio is trimmed to the [begin, end] range specified by the MonoCut.
:return: a numpy ndarray with audio samples, with shape (1 <channel>, N <samples>)
"""
if self.has_recording:
return self.recording.load_audio(
channels=self.channel,
offset=self.start,
duration=self.duration,
)
return None
def drop_features(self) -> "MonoCut":
"""Return a copy of the current :class:`.MonoCut`, detached from ``features``."""
assert (
self.has_recording
), f"Cannot detach features from a MonoCut with no Recording (cut ID = {self.id})."
return fastcopy(self, features=None)
def drop_recording(self) -> "MonoCut":
"""Return a copy of the current :class:`.MonoCut`, detached from ``recording``."""
assert (
self.has_features
), f"Cannot detach recording from a MonoCut with no Features (cut ID = {self.id})."
return fastcopy(self, recording=None)
def drop_supervisions(self) -> "MonoCut":
"""Return a copy of the current :class:`.MonoCut`, detached from ``supervisions``."""
return fastcopy(self, supervisions=[])
def compute_and_store_features(
self,
extractor: FeatureExtractor,
storage: FeaturesWriter,
augment_fn: Optional[AugmentFn] = None,
*args,
**kwargs,
) -> Cut:
"""
Compute the features from this cut, store them on disk, and attach a feature manifest to this cut.
This cut has to be able to load audio.
:param extractor: a ``FeatureExtractor`` instance used to compute the features.
:param storage: a ``FeaturesWriter`` instance used to write the features to a storage.
:param augment_fn: an optional callable used for audio augmentation.
:return: a new ``MonoCut`` instance with a ``Features`` manifest attached to it.
"""
features_info = extractor.extract_from_samples_and_store(
samples=self.load_audio(),
storage=storage,
sampling_rate=self.sampling_rate,
offset=self.start,
channel=self.channel,
augment_fn=augment_fn,
)
# The fastest way to instantiate a copy of the cut with a Features object attached
return fastcopy(self, features=features_info)
def truncate(
self,
*,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
keep_excessive_supervisions: bool = True,
preserve_id: bool = False,
_supervisions_index: Optional[Dict[str, IntervalTree]] = None,
) -> "MonoCut":
"""
Returns a new MonoCut that is a sub-region of the current MonoCut.
Note that no operation is done on the actual features or recording -
it's only during the call to :meth:`MonoCut.load_features` / :meth:`MonoCut.load_audio`
when the actual changes happen (a subset of features/audio is loaded).
:param offset: float (seconds), controls the start of the new cut relative to the current MonoCut's start.
E.g., if the current MonoCut starts at 10.0, and offset is 2.0, the new start is 12.0.
:param duration: optional float (seconds), controls the duration of the resulting MonoCut.
By default, the duration is (end of the cut before truncation) - (offset).
:param keep_excessive_supervisions: bool. Since trimming may happen inside a SupervisionSegment,
the caller has an option to either keep or discard such supervisions.
:param preserve_id: bool. Should the truncated cut keep the same ID or get a new, random one.
:param _supervisions_index: an IntervalTree; when passed, allows to speed up processing of Cuts with a very
large number of supervisions. Intended as an internal parameter.
:return: a new MonoCut instance. If the current MonoCut is shorter than the duration, return None.
"""
# Note: technically, truncate's code can be used for "expanding" the cut as well:
# In that case, we must ensure that the start of MonoCut is not before the start
# of the actual Recording, hence max(..., 0).
new_start = max(self.start + offset, 0)
until = offset + (duration if duration is not None else self.duration)
new_duration = self.duration - new_start if duration is None else until - offset
assert new_duration > 0.0
duration_past_end = (new_start + new_duration) - (self.start + self.duration)
if duration_past_end > 0:
# When the end of the MonoCut has been exceeded, trim the new duration to not exceed the old MonoCut's end.
new_duration -= duration_past_end
# Round the duration to avoid the possible loss of a single audio sample due to floating point
# additions and subtractions.
new_duration = round(new_duration, ndigits=8)
if _supervisions_index is None:
criterion = overlaps if keep_excessive_supervisions else overspans
new_time_span = TimeSpan(start=0, end=new_duration)
new_supervisions = (
segment.with_offset(-offset) for segment in self.supervisions
)
supervisions = [
segment
for segment in new_supervisions
if criterion(new_time_span, segment)
]
else:
tree = _supervisions_index[self.id]
# Below we select which method should be called on the IntervalTree object.
# The result of calling that method with a range of (begin, end) is an iterable
# of Intervals that contain the SupervisionSegments matching our criterion.
# We call "interval.data" to obtain the underlying SupervisionSegment.
# Additionally, when the method is tree.envelop, we use a small epsilon to
# extend the searched boundaries to account for possible float arithmetic errors.
if keep_excessive_supervisions:
intervals = tree.overlap(begin=offset, end=offset + new_duration)
else:
intervals = tree.envelop(
begin=offset - 1e-3, end=offset + new_duration + 1e-3
)
supervisions = []
for interval in intervals:
# We are going to measure the overlap ratio of the supervision with the "truncated" cut
# and reject segments that overlap less than 1%. This way we can avoid quirks and errors
# of limited float precision.
olap_ratio = measure_overlap(
interval.data, TimeSpan(offset, offset + new_duration)
)
if olap_ratio > 0.01:
supervisions.append(interval.data.with_offset(-offset))
return fastcopy(
self,
id=self.id if preserve_id else str(uuid4()),
start=new_start,
duration=new_duration,
supervisions=sorted(supervisions, key=lambda s: s.start),
)
def pad(
self,
duration: Seconds = None,
num_frames: int = None,
num_samples: int = None,
pad_feat_value: float = LOG_EPSILON,
direction: str = "right",
preserve_id: bool = False,
pad_value_dict: Optional[Dict[str, Union[int, float]]] = None,
) -> Cut:
"""
Return a new MixedCut, padded with zeros in the recording, and ``pad_feat_value`` in each feature bin.
The user can choose to pad either to a specific `duration`; a specific number of frames `max_frames`;
or a specific number of samples `num_samples`. The three arguments are mutually exclusive.
:param duration: The cut's minimal duration after padding.
:param num_frames: The cut's total number of frames after padding.
:param num_samples: The cut's total number of samples after padding.
:param pad_feat_value: A float value that's used for padding the features.
By default we assume a log-energy floor of approx. -23 (1e-10 after exp).
:param direction: string, 'left', 'right' or 'both'. Determines whether the padding is added before or after
the cut.
:param preserve_id: When ``True``, preserves the cut ID before padding.
Otherwise, a new random ID is generated for the padded cut (default).
:param pad_value_dict: Optional dict that specifies what value should be used
for padding arrays in custom attributes.
:return: a padded MixedCut if duration is greater than this cut's duration, otherwise ``self``.
"""
return pad(
self,
duration=duration,
num_frames=num_frames,
num_samples=num_samples,
pad_feat_value=pad_feat_value,
direction=direction,
preserve_id=preserve_id,
pad_value_dict=pad_value_dict,
)
def resample(self, sampling_rate: int, affix_id: bool = False) -> "MonoCut":
"""
Return a new ``MonoCut`` that will lazily resample the audio while reading it.
This operation will drop the feature manifest, if attached.
It does not affect the supervision.
:param sampling_rate: The new sampling rate.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the current ``MonoCut``.
"""
assert self.has_recording, "Cannot resample a MonoCut without Recording."
return fastcopy(
self,
id=f"{self.id}_rs{sampling_rate}" if affix_id else self.id,
recording=self.recording.resample(sampling_rate),
features=None,
)
def perturb_speed(self, factor: float, affix_id: bool = True) -> "MonoCut":
"""
Return a new ``MonoCut`` that will lazily perturb the speed while loading audio.
The ``num_samples``, ``start`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of speed.
We are also updating the time markers of the underlying ``Recording`` and the supervisions.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``MonoCut.id`` field
by affixing it with "_sp{factor}".
:return: a modified copy of the current ``MonoCut``.
"""
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb speed on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb speed on a MonoCut that references pre-computed features. "
"The feature manifest will be detached, as we do not support feature-domain "
"speed perturbation."
)
self.features = None
# Actual audio perturbation.
recording_sp = self.recording.perturb_speed(factor=factor, affix_id=affix_id)
# Match the supervision's start and duration to the perturbed audio.
# Since SupervisionSegment "start" is relative to the MonoCut's, it's okay (and necessary)
# to perturb it as well.
supervisions_sp = [
s.perturb_speed(
factor=factor, sampling_rate=self.sampling_rate, affix_id=affix_id
)
for s in self.supervisions
]
# New start and duration have to be computed through num_samples to be accurate
start_samples = perturb_num_samples(
compute_num_samples(self.start, self.sampling_rate), factor
)
new_start = start_samples / self.sampling_rate
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f"{self.id}_sp{factor}" if affix_id else self.id,
recording=recording_sp,
supervisions=supervisions_sp,
duration=new_duration,
start=new_start,
)
def perturb_tempo(self, factor: float, affix_id: bool = True) -> "MonoCut":
"""
Return a new ``MonoCut`` that will lazily perturb the tempo while loading audio.
Compared to speed perturbation, tempo preserves pitch.
The ``num_samples``, ``start`` and ``duration`` fields are updated to reflect the
shrinking/extending effect of speed.
We are also updating the time markers of the underlying ``Recording`` and the supervisions.
:param factor: The tempo will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``MonoCut.id`` field
by affixing it with "_tp{factor}".
:return: a modified copy of the current ``MonoCut``.
"""
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb speed on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb tempo on a MonoCut that references pre-computed features. "
"The feature manifest will be detached, as we do not support feature-domain "
"speed perturbation."
)
self.features = None
# Actual audio perturbation.
recording_sp = self.recording.perturb_tempo(factor=factor, affix_id=affix_id)
# Match the supervision's start and duration to the perturbed audio.
# Since SupervisionSegment "start" is relative to the MonoCut's, it's okay (and necessary)
# to perturb it as well.
supervisions_sp = [
s.perturb_tempo(
factor=factor, sampling_rate=self.sampling_rate, affix_id=affix_id
)
for s in self.supervisions
]
# New start and duration have to be computed through num_samples to be accurate
start_samples = perturb_num_samples(
compute_num_samples(self.start, self.sampling_rate), factor
)
new_start = start_samples / self.sampling_rate
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f"{self.id}_tp{factor}" if affix_id else self.id,
recording=recording_sp,
supervisions=supervisions_sp,
duration=new_duration,
start=new_start,
)
def perturb_volume(self, factor: float, affix_id: bool = True) -> "MonoCut":
"""
Return a new ``MonoCut`` that will lazily perturb the volume while loading audio.
:param factor: The volume will be adjusted this many times (e.g. factor=1.1 means 1.1x louder).
:param affix_id: When true, we will modify the ``MonoCut.id`` field
by affixing it with "_vp{factor}".
:return: a modified copy of the current ``MonoCut``.
"""
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb volume on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb volume on a MonoCut that references pre-computed features. "
"The feature manifest will be detached, as we do not support feature-domain "
"volume perturbation."
)
self.features = None
# Actual audio perturbation.
recording_vp = self.recording.perturb_volume(factor=factor, affix_id=affix_id)
# Match the supervision's id (and it's underlying recording id).
supervisions_vp = [
s.perturb_volume(factor=factor, affix_id=affix_id)
for s in self.supervisions
]
return fastcopy(
self,
id=f"{self.id}_vp{factor}" if affix_id else self.id,
recording=recording_vp,
supervisions=supervisions_vp,
)
def map_supervisions(
self, transform_fn: Callable[[SupervisionSegment], SupervisionSegment]
) -> Cut:
"""
Modify the SupervisionSegments by `transform_fn` of this MonoCut.
:param transform_fn: a function that modifies a supervision as an argument.
:return: a modified MonoCut.
"""
new_cut = fastcopy(
self, supervisions=[s.map(transform_fn) for s in self.supervisions]
)
return new_cut
def filter_supervisions(
self, predicate: Callable[[SupervisionSegment], bool]
) -> Cut:
"""
Modify cut to store only supervisions accepted by `predicate`
Example:
>>> cut = cut.filter_supervisions(lambda s: s.id in supervision_ids)
>>> cut = cut.filter_supervisions(lambda s: s.duration < 5.0)
>>> cut = cut.filter_supervisions(lambda s: s.text is not None)
:param predicate: A callable that accepts `SupervisionSegment` and returns bool
:return: a modified MonoCut
"""
new_cut = fastcopy(
self, supervisions=[s for s in self.supervisions if predicate(s)]
)
return new_cut
@staticmethod
def from_dict(data: dict) -> "MonoCut":
from lhotse.serialization import deserialize_custom_field
features = (
Features.from_dict(data.pop("features")) if "features" in data else None
)
recording = (
Recording.from_dict(data.pop("recording")) if "recording" in data else None
)
supervision_infos = data.pop("supervisions") if "supervisions" in data else []
if "custom" in data:
deserialize_custom_field(data["custom"])
return MonoCut(
**data,
features=features,
recording=recording,
supervisions=[SupervisionSegment.from_dict(s) for s in supervision_infos],
)
def with_features_path_prefix(self, path: Pathlike) -> "MonoCut":
if not self.has_features:
return self
return fastcopy(self, features=self.features.with_path_prefix(path))
def with_recording_path_prefix(self, path: Pathlike) -> "MonoCut":
if not self.has_recording:
return self
return fastcopy(self, recording=self.recording.with_path_prefix(path))
@dataclass
class PaddingCut(Cut):
"""
:class:`~lhotse.cut.PaddingCut` is a dummy :class:`~lhotse.cut.Cut` that doesn't refer to
actual recordings or features --it simply returns zero samples in the time domain
and a specified features value in the feature domain.
Its main role is to be appended to other cuts to make them evenly sized.
Please refer to the documentation of :class:`~lhotse.cut.Cut` to learn more about using cuts.
See also:
- :class:`lhotse.cut.Cut`
- :class:`lhotse.cut.MonoCut`
- :class:`lhotse.cut.MixedCut`
- :class:`lhotse.cut.CutSet`
"""
id: str
duration: Seconds
sampling_rate: int
feat_value: float
# For frequency domain
num_frames: Optional[int] = None
num_features: Optional[int] = None
frame_shift: Optional[float] = None
# For time domain
num_samples: Optional[int] = None
# Dict for storing padding values for custom array attributes
custom: Optional[dict] = None
@property
def start(self) -> Seconds:
return 0
@property
def end(self) -> Seconds:
return self.duration
@property
def supervisions(self):
return []
@property
def has_features(self) -> bool:
return self.num_frames is not None
@property
def has_recording(self) -> bool:
return self.num_samples is not None
# noinspection PyUnusedLocal
def load_features(self, *args, **kwargs) -> Optional[np.ndarray]:
if self.has_features:
return (
np.ones((self.num_frames, self.num_features), np.float32)
* self.feat_value
)
return None
# noinspection PyUnusedLocal
def load_audio(self, *args, **kwargs) -> Optional[np.ndarray]:
if self.has_recording:
return np.zeros(
(1, compute_num_samples(self.duration, self.sampling_rate)), np.float32
)
return None
# noinspection PyUnusedLocal
def truncate(
self,
*,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
keep_excessive_supervisions: bool = True,
preserve_id: bool = False,
**kwargs,
) -> "PaddingCut":
new_duration = self.duration - offset if duration is None else duration
assert new_duration > 0.0
return fastcopy(
self,
id=self.id if preserve_id else str(uuid4()),
duration=new_duration,
feat_value=self.feat_value,
num_frames=compute_num_frames(
duration=new_duration,
frame_shift=self.frame_shift,
sampling_rate=self.sampling_rate,
)
if self.num_frames is not None
else None,
num_samples=compute_num_samples(
duration=new_duration, sampling_rate=self.sampling_rate
)
if self.num_samples is not None
else None,
)
def pad(
self,
duration: Seconds = None,
num_frames: int = None,
num_samples: int = None,
pad_feat_value: float = LOG_EPSILON,
direction: str = "right",
preserve_id: bool = False,
pad_value_dict: Optional[Dict[str, Union[int, float]]] = None,
) -> Cut:
"""
Return a new MixedCut, padded with zeros in the recording, and ``pad_feat_value`` in each feature bin.
The user can choose to pad either to a specific `duration`; a specific number of frames `max_frames`;
or a specific number of samples `num_samples`. The three arguments are mutually exclusive.
:param duration: The cut's minimal duration after padding.
:param num_frames: The cut's total number of frames after padding.
:param num_samples: The cut's total number of samples after padding.
:param pad_feat_value: A float value that's used for padding the features.
By default we assume a log-energy floor of approx. -23 (1e-10 after exp).
:param direction: string, 'left', 'right' or 'both'. Determines whether the padding is added before or after
the cut.
:param preserve_id: When ``True``, preserves the cut ID from before padding.
Otherwise, generates a new random ID (default).
:param pad_value_dict: Optional dict that specifies what value should be used
for padding arrays in custom attributes.
:return: a padded MixedCut if duration is greater than this cut's duration, otherwise ``self``.
"""
return pad(
self,
duration=duration,
num_frames=num_frames,
num_samples=num_samples,
pad_feat_value=pad_feat_value,
direction=direction,
preserve_id=preserve_id,
pad_value_dict=pad_value_dict,
)
def resample(self, sampling_rate: int, affix_id: bool = False) -> "PaddingCut":
"""
Return a new ``MonoCut`` that will lazily resample the audio while reading it.
This operation will drop the feature manifest, if attached.
It does not affect the supervision.
:param sampling_rate: The new sampling rate.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the current ``MonoCut``.
"""
assert self.has_recording, "Cannot resample a MonoCut without Recording."
return fastcopy(
self,
id=f"{self.id}_rs{sampling_rate}" if affix_id else self.id,
sampling_rate=sampling_rate,
num_samples=compute_num_samples(self.duration, sampling_rate),
num_frames=None,
num_features=None,
frame_shift=None,
)
def perturb_speed(self, factor: float, affix_id: bool = True) -> "PaddingCut":
"""
Return a new ``PaddingCut`` that will "mimic" the effect of speed perturbation
on ``duration`` and ``num_samples``.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``PaddingCut.id`` field
by affixing it with "_sp{factor}".
:return: a modified copy of the current ``PaddingCut``.
"""
# Pre-conditions
if self.has_features:
logging.warning(
"Attempting to perturb speed on a MonoCut that references pre-computed features. "
"The feature manifest will be detached, as we do not support feature-domain "
"speed perturbation."
)
new_num_frames = None
new_num_features = None
new_frame_shift = None
else:
new_num_frames = self.num_frames
new_num_features = self.num_features
new_frame_shift = self.frame_shift
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f"{self.id}_sp{factor}" if affix_id else self.id,
num_samples=new_num_samples,
duration=new_duration,
num_frames=new_num_frames,
num_features=new_num_features,
frame_shift=new_frame_shift,
)
def perturb_tempo(self, factor: float, affix_id: bool = True) -> "PaddingCut":
"""
Return a new ``PaddingCut`` that will "mimic" the effect of tempo perturbation
on ``duration`` and ``num_samples``.
Compared to speed perturbation, tempo preserves pitch.
:param factor: The tempo will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``PaddingCut.id`` field
by affixing it with "_tp{factor}".
:return: a modified copy of the current ``PaddingCut``.
"""
# Pre-conditions
if self.has_features:
logging.warning(
"Attempting to perturb tempo on a MonoCut that references pre-computed features. "
"The feature manifest will be detached, as we do not support feature-domain "
"tempo perturbation."
)
new_num_frames = None
new_num_features = None
new_frame_shift = None
else:
new_num_frames = self.num_frames
new_num_features = self.num_features
new_frame_shift = self.frame_shift
new_num_samples = perturb_num_samples(self.num_samples, factor)
new_duration = new_num_samples / self.sampling_rate
return fastcopy(
self,
id=f"{self.id}_tp{factor}" if affix_id else self.id,
num_samples=new_num_samples,
duration=new_duration,
num_frames=new_num_frames,
num_features=new_num_features,
frame_shift=new_frame_shift,
)
def perturb_volume(self, factor: float, affix_id: bool = True) -> "PaddingCut":
"""
Return a new ``PaddingCut`` that will "mimic" the effect of volume perturbation
on amplitude of samples.
:param factor: The volume will be adjusted this many times (e.g. factor=1.1 means 1.1x louder).
:param affix_id: When true, we will modify the ``PaddingCut.id`` field
by affixing it with "_vp{factor}".
:return: a modified copy of the current ``PaddingCut``.
"""
return fastcopy(self, id=f"{self.id}_vp{factor}" if affix_id else self.id)
def drop_features(self) -> "PaddingCut":
"""Return a copy of the current :class:`.PaddingCut`, detached from ``features``."""
assert (
self.has_recording
), f"Cannot detach features from a MonoCut with no Recording (cut ID = {self.id})."
return fastcopy(self, num_frames=None, num_features=None, frame_shift=None)
def drop_recording(self) -> "PaddingCut":
"""Return a copy of the current :class:`.PaddingCut`, detached from ``recording``."""
assert (
self.has_features
), f"Cannot detach recording from a PaddingCut with no Features (cut ID = {self.id})."
return fastcopy(self, num_samples=None)
def drop_supervisions(self) -> "PaddingCut":
"""Return a copy of the current :class:`.PaddingCut`, detached from ``supervisions``."""
return self
def compute_and_store_features(
self, extractor: FeatureExtractor, *args, **kwargs
) -> Cut:
"""
Returns a new PaddingCut with updates information about the feature dimension and number of
feature frames, depending on the ``extractor`` properties.
"""
return fastcopy(
self,
num_features=extractor.feature_dim(self.sampling_rate),
num_frames=compute_num_frames(
duration=self.duration,
frame_shift=extractor.frame_shift,
sampling_rate=self.sampling_rate,
),
frame_shift=extractor.frame_shift,
)
def map_supervisions(self, transform_fn: Callable[[Any], Any]) -> Cut:
"""
Just for consistency with `MonoCut` and `MixedCut`.
:param transform_fn: a dummy function that would be never called actually.
:return: the PaddingCut itself.
"""
return self
def filter_supervisions(
self, predicate: Callable[[SupervisionSegment], bool]
) -> Cut:
"""
Just for consistency with `MonoCut` and `MixedCut`.
:param predicate: A callable that accepts `SupervisionSegment` and returns bool
:return: a modified MonoCut
"""
return self
@staticmethod
def from_dict(data: dict) -> "PaddingCut":
return PaddingCut(**data)
def with_features_path_prefix(self, path: Pathlike) -> "PaddingCut":
return self
def with_recording_path_prefix(self, path: Pathlike) -> "PaddingCut":
return self
@dataclass
class MixTrack:
"""
Represents a single track in a mix of Cuts. Points to a specific MonoCut and holds information on
how to mix it with other Cuts, relative to the first track in a mix.
"""
cut: Union[MonoCut, PaddingCut]
offset: Seconds = 0.0
snr: Optional[Decibels] = None
@staticmethod
def from_dict(data: dict):
raw_cut = data.pop("cut")
try:
cut = MonoCut.from_dict(raw_cut)
except TypeError:
cut = PaddingCut.from_dict(raw_cut)
return MixTrack(cut, **data)
@dataclass
class MixedCut(Cut):
"""
:class:`~lhotse.cut.MixedCut` is a :class:`~lhotse.cut.Cut` that actually consists of multiple other cuts.
It can be interpreted as a multi-channel cut, but its primary purpose is to allow
time-domain and feature-domain augmentation via mixing the training cuts with noise, music, and babble cuts.
The actual mixing operations are performed on-the-fly.
Internally, :class:`~lhotse.cut.MixedCut` holds other cuts in multiple trakcs (:class:`~lhotse.cut.MixTrack`),
each with its own offset and SNR that is relative to the first track.
Please refer to the documentation of :class:`~lhotse.cut.Cut` to learn more about using cuts.
In addition to methods available in :class:`~lhotse.cut.Cut`, :class:`~lhotse.cut.MixedCut` provides the methods to
read all of its tracks audio and features as separate channels:
>>> cut = MixedCut(...)
>>> mono_features = cut.load_features()
>>> assert len(mono_features.shape) == 2
>>> multi_features = cut.load_features(mixed=False)
>>> # Now, the first dimension is the channel.
>>> assert len(multi_features.shape) == 3
See also:
- :class:`lhotse.cut.Cut`
- :class:`lhotse.cut.MonoCut`
- :class:`lhotse.cut.CutSet`
"""
id: str
tracks: List[MixTrack]
@property
def supervisions(self) -> List[SupervisionSegment]:
"""
Lists the supervisions of the underlying source cuts.
Each segment start time will be adjusted by the track offset.
"""
return [
segment.with_offset(track.offset)
for track in self.tracks
for segment in track.cut.supervisions
]
@property
def start(self) -> Seconds:
return 0
@property
def end(self) -> Seconds:
return self.duration
@property
def duration(self) -> Seconds:
track_durations = (track.offset + track.cut.duration for track in self.tracks)
return round(max(track_durations), ndigits=8)
@property
def has_features(self) -> bool:
return self._first_non_padding_cut.has_features
@property
def has_recording(self) -> bool:
return self._first_non_padding_cut.has_recording
@property
def num_frames(self) -> Optional[int]:
if self.has_features:
return compute_num_frames(
duration=self.duration,
frame_shift=self.frame_shift,
sampling_rate=self.sampling_rate,
)
return None
@property
def frame_shift(self) -> Optional[Seconds]:
return self.tracks[0].cut.frame_shift
@property
def sampling_rate(self) -> Optional[int]:
return self.tracks[0].cut.sampling_rate
@property
def num_samples(self) -> Optional[int]:
return compute_num_samples(self.duration, self.sampling_rate)
@property
def num_features(self) -> Optional[int]:
return self.tracks[0].cut.num_features
@property
def features_type(self) -> Optional[str]:
return self._first_non_padding_cut.features.type if self.has_features else None
def __getattr__(self, name: str) -> Any:
"""
This magic function is called when the user tries to access an attribute
of :class:`.MixedCut` that doesn't exist. It is used for accessing the custom
attributes of cuts. We support exactly one scenario for mixed cuts:
If :attr:`tracks` contains exactly one :class:`.MonoCut` object (and an arbitrary
number of :class:`.PaddingCut` objects), we will look up the custom attributes
of that cut.
If one of the custom attributes is of type :class:`~lhotse.array.Array` or
:class:`~lhotse.array.TemporalArray` we'll also support loading those arrays
(see example below). Additionally, we will incorporate extra padding as
dictated by padding cuts.
Example:
>>> cut = MonoCut('cut1', start=0, duration=4, channel=0)
>>> cut.alignment = TemporalArray(...)
>>> mixed_cut = cut.pad(10, pad_value_dict={'alignment': -1})
>>> ali = mixed_cut.load_alignment()
"""
# Python will sometimes try to call undefined magic functions,
# just fail for them (e.g. __setstate__ when pickling).
if name.startswith("__"):
raise AttributeError()
# Loading a custom array attribute + performing padding.
if name.startswith("load_"):
attr_name = name[5:]
return partial(self.load_custom, attr_name)
# Returning the contents of "mono_cut.custom[name]",
# or raising AttributeError.
try:
(
non_padding_idx,
mono_cut,
) = self._assert_mono_cut_with_padding_and_return_it_with_track_index()
return getattr(mono_cut, name)
except AssertionError:
raise AttributeError(
f"No such attribute: '{name}' (note: custom attributes are not supported "
f"when the mixed cut has a different number of MonoCut tracks than one)."
)
def load_custom(self, name: str) -> np.ndarray:
"""
Load custom data as numpy array. The custom data is expected to have
been stored in cuts ``custom`` field as an :class:`~lhotse.array.Array` or
:class:`~lhotse.array.TemporalArray` manifest.
.. note:: It works with Array manifests stored via attribute assignments,
e.g.: ``cut.my_custom_data = Array(...)``.
.. warning:: For :class:`.MixedCut`, this will only work if the mixed cut
consists of a single :class:`.MonoCut` and an arbitrary number of
:class:`.PaddingCuts`. This is because it is generally undefined how to
mix arbitrary arrays.
:param name: name of the custom attribute.
:return: a numpy array with the data (after padding).
"""
from lhotse.array import Array, pad_array
(
non_padding_idx,
mono_cut,
) = self._assert_mono_cut_with_padding_and_return_it_with_track_index()
# Load the array and retrieve the manifest from the only non-padding cut.
# Use getattr to propagate AttributeError if "name" is not defined.
array = mono_cut.load_custom(name)
manifest = getattr(mono_cut, name)
# Check if the corresponding manifest for 'load_something' is of type
# Array; if yes, just return the loaded data.
# This is likely an embedding without a temporal dimension.
if isinstance(manifest, Array):
return array
# We are loading an array with a temporal dimension:
# We need to pad it.
left_padding = self.tracks[non_padding_idx].offset
padded_duration = self.duration
pad_value = [t.cut for t in self.tracks if isinstance(t.cut, PaddingCut)][
0
].custom[name]
return pad_array(
array,
temporal_dim=manifest.temporal_dim,
frame_shift=manifest.frame_shift,
offset=left_padding,
padded_duration=padded_duration,
pad_value=pad_value,
)
def _assert_mono_cut_with_padding_and_return_it_with_track_index(
self,
) -> Tuple[int, MonoCut]:
# TODO(pzelasko): consider relaxing this condition to
# supporting mixed cuts that are not overlapping
non_padding_cuts = [
(idx, t.cut)
for idx, t in enumerate(self.tracks)
if isinstance(t.cut, MonoCut)
]
assert (
len(non_padding_cuts) == 1
), f"The cut has {len(non_padding_cuts)} (expected exactly one)"
non_padding_idx, mono_cut = non_padding_cuts[0]
return non_padding_idx, mono_cut
def truncate(
self,
*,
offset: Seconds = 0.0,
duration: Optional[Seconds] = None,
keep_excessive_supervisions: bool = True,
preserve_id: bool = False,
_supervisions_index: Optional[Dict[str, IntervalTree]] = None,
) -> Cut:
"""
Returns a new MixedCut that is a sub-region of the current MixedCut. This method truncates the underlying Cuts
and modifies their offsets in the mix, as needed. Tracks that do not fit in the truncated cut are removed.
Note that no operation is done on the actual features - it's only during the call to load_features()
when the actual changes happen (a subset of features is loaded).
:param offset: float (seconds), controls the start of the new cut relative to the current MixedCut's start.
:param duration: optional float (seconds), controls the duration of the resulting MixedCut.
By default, the duration is (end of the cut before truncation) - (offset).
:param keep_excessive_supervisions: bool. Since trimming may happen inside a SupervisionSegment, the caller has
an option to either keep or discard such supervisions.
:param preserve_id: bool. Should the truncated cut keep the same ID or get a new, random one.
:return: a new MixedCut instance.
"""
new_tracks = []
old_duration = self.duration
new_mix_end = old_duration - offset if duration is None else offset + duration
for track in sorted(self.tracks, key=lambda t: t.offset):
# First, determine how much of the beginning of the current track we're going to truncate:
# when the track offset is larger than the truncation offset, we are not truncating the cut;
# just decreasing the track offset.
# 'cut_offset' determines how much we're going to truncate the Cut for the current track.
cut_offset = max(offset - track.offset, 0)
# 'track_offset' determines the new track's offset after truncation.
track_offset = max(track.offset - offset, 0)
# 'track_end' is expressed relative to the beginning of the mix
# (not to be confused with the 'start' of the underlying MonoCut)
track_end = track.offset + track.cut.duration
if track_end < offset:
# Omit a MonoCut that ends before the truncation offset.
continue
cut_duration_decrease = 0
if track_end > new_mix_end:
if duration is not None:
cut_duration_decrease = track_end - new_mix_end
else:
cut_duration_decrease = track_end - old_duration
# Compute the new MonoCut's duration after trimming the start and the end.
new_duration = track.cut.duration - cut_offset - cut_duration_decrease
if new_duration <= 0:
# Omit a MonoCut that is completely outside the time span of the new truncated MixedCut.
continue
new_tracks.append(
MixTrack(
cut=track.cut.truncate(
offset=cut_offset,
duration=new_duration,
keep_excessive_supervisions=keep_excessive_supervisions,
preserve_id=preserve_id,
_supervisions_index=_supervisions_index,
),
offset=track_offset,
snr=track.snr,
)
)
if len(new_tracks) == 1:
# The truncation resulted in just a single cut - simply return it.
return new_tracks[0].cut
return MixedCut(id=self.id if preserve_id else str(uuid4()), tracks=new_tracks)
def pad(
self,
duration: Seconds = None,
num_frames: int = None,
num_samples: int = None,
pad_feat_value: float = LOG_EPSILON,
direction: str = "right",
preserve_id: bool = False,
pad_value_dict: Optional[Dict[str, Union[int, float]]] = None,
) -> Cut:
"""
Return a new MixedCut, padded with zeros in the recording, and ``pad_feat_value`` in each feature bin.
The user can choose to pad either to a specific `duration`; a specific number of frames `max_frames`;
or a specific number of samples `num_samples`. The three arguments are mutually exclusive.
:param duration: The cut's minimal duration after padding.
:param num_frames: The cut's total number of frames after padding.
:param num_samples: The cut's total number of samples after padding.
:param pad_feat_value: A float value that's used for padding the features.
By default we assume a log-energy floor of approx. -23 (1e-10 after exp).
:param direction: string, 'left', 'right' or 'both'. Determines whether the padding is added before or after
the cut.
:param preserve_id: When ``True``, preserves the cut ID from before padding.
Otherwise, generates a new random ID (default).
:param pad_value_dict: Optional dict that specifies what value should be used
for padding arrays in custom attributes.
:return: a padded MixedCut if duration is greater than this cut's duration, otherwise ``self``.
"""
return pad(
self,
duration=duration,
num_frames=num_frames,
num_samples=num_samples,
pad_feat_value=pad_feat_value,
direction=direction,
preserve_id=preserve_id,
pad_value_dict=pad_value_dict,
)
def resample(self, sampling_rate: int, affix_id: bool = False) -> "MixedCut":
"""
Return a new ``MixedCut`` that will lazily resample the audio while reading it.
This operation will drop the feature manifest, if attached.
It does not affect the supervision.
:param sampling_rate: The new sampling rate.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the current ``MixedCut``.
"""
assert self.has_recording, "Cannot resample a MixedCut without Recording."
return MixedCut(
id=f"{self.id}_rs{sampling_rate}" if affix_id else self.id,
tracks=[
fastcopy(t, cut=t.cut.resample(sampling_rate)) for t in self.tracks
],
)
def perturb_speed(self, factor: float, affix_id: bool = True) -> "MixedCut":
"""
Return a new ``MixedCut`` that will lazily perturb the speed while loading audio.
The ``num_samples``, ``start`` and ``duration`` fields of the underlying Cuts
(and their Recordings and SupervisionSegments) are updated to reflect
the shrinking/extending effect of speed.
We are also updating the offsets of all underlying tracks.
:param factor: The speed will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``MixedCut.id`` field
by affixing it with "_sp{factor}".
:return: a modified copy of the current ``MixedCut``.
"""
# TODO(pzelasko): test most extensively for edge cases
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb speed on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb speed on a MixedCut that references pre-computed features. "
"The feature manifest(s) will be detached, as we do not support feature-domain "
"speed perturbation."
)
return MixedCut(
id=f"{self.id}_sp{factor}" if affix_id else self.id,
tracks=[
MixTrack(
cut=track.cut.perturb_speed(factor=factor, affix_id=affix_id),
offset=round(
perturb_num_samples(
num_samples=compute_num_samples(
track.offset, self.sampling_rate
),
factor=factor,
)
/ self.sampling_rate,
ndigits=8,
),
snr=track.snr,
)
for track in self.tracks
],
)
def perturb_tempo(self, factor: float, affix_id: bool = True) -> "MixedCut":
"""
Return a new ``MixedCut`` that will lazily perturb the tempo while loading audio.
Compared to speed perturbation, tempo preserves pitch.
The ``num_samples``, ``start`` and ``duration`` fields of the underlying Cuts
(and their Recordings and SupervisionSegments) are updated to reflect
the shrinking/extending effect of tempo.
We are also updating the offsets of all underlying tracks.
:param factor: The tempo will be adjusted this many times (e.g. factor=1.1 means 1.1x faster).
:param affix_id: When true, we will modify the ``MixedCut.id`` field
by affixing it with "_tp{factor}".
:return: a modified copy of the current ``MixedCut``.
"""
# TODO(pzelasko): test most extensively for edge cases
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb tempo on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb tempo on a MixedCut that references pre-computed features. "
"The feature manifest(s) will be detached, as we do not support feature-domain "
"tempo perturbation."
)
return MixedCut(
id=f"{self.id}_tp{factor}" if affix_id else self.id,
tracks=[
MixTrack(
cut=track.cut.perturb_tempo(factor=factor, affix_id=affix_id),
offset=round(
perturb_num_samples(
num_samples=compute_num_samples(
track.offset, self.sampling_rate
),
factor=factor,
)
/ self.sampling_rate,
ndigits=8,
),
snr=track.snr,
)
for track in self.tracks
],
)
def perturb_volume(self, factor: float, affix_id: bool = True) -> "MixedCut":
"""
Return a new ``MixedCut`` that will lazily perturb the volume while loading audio.
Recordings of the underlying Cuts are updated to reflect volume change.
:param factor: The volume will be adjusted this many times (e.g. factor=1.1 means 1.1x louder).
:param affix_id: When true, we will modify the ``MixedCut.id`` field
by affixing it with "_vp{factor}".
:return: a modified copy of the current ``MixedCut``.
"""
# Pre-conditions
assert (
self.has_recording
), "Cannot perturb volume on a MonoCut without Recording."
if self.has_features:
logging.warning(
"Attempting to perturb volume on a MixedCut that references pre-computed features. "
"The feature manifest(s) will be detached, as we do not support feature-domain "
"volume perturbation."
)
return MixedCut(
id=f"{self.id}_vp{factor}" if affix_id else self.id,
tracks=[
fastcopy(
track,
cut=track.cut.perturb_volume(factor=factor, affix_id=affix_id),
)
for track in self.tracks
],
)
@rich_exception_info
def load_features(self, mixed: bool = True) -> Optional[np.ndarray]:
"""
Loads the features of the source cuts and mixes them on-the-fly.
:param mixed: when True (default), returns a 2D array of features mixed in the feature domain.
Otherwise returns a 3D array with the first dimension equal to the number of tracks.
:return: A numpy ndarray with features and with shape ``(num_frames, num_features)``,
or ``(num_tracks, num_frames, num_features)``
"""
if not self.has_features:
return None
first_cut = self.tracks[0].cut
# First, check for a simple scenario: just a single cut with padding.
# When that is the case, we don't have to instantiate a feature extractor,
# because we are not performing any actual mixing.
# That makes life simpler for the users who have a custom feature extractor,
# but don't actually care about feature-domain mixing; just want to pad.
if mixed and all(isinstance(t.cut, PaddingCut) for t in self.tracks[1:]):
padding_val = self.tracks[1].cut.feat_value
feats = np.ones((self.num_frames, self.num_features)) * padding_val
feats[: first_cut.num_frames, :] = first_cut.load_features()
return feats
# When there is more than one "regular" cut, we will perform an actual mix.
mixer = FeatureMixer(
feature_extractor=create_default_feature_extractor(
self._first_non_padding_cut.features.type
),
base_feats=first_cut.load_features(),
frame_shift=first_cut.frame_shift,
)
for track in self.tracks[1:]:
try:
mixer.add_to_mix(
feats=track.cut.load_features(),
snr=track.snr,
offset=track.offset,
sampling_rate=track.cut.sampling_rate,
)
except NonPositiveEnergyError as e:
logging.warning(
str(e) + f' MonoCut with id "{track.cut.id}" will not be mixed in.'
)
if mixed:
feats = mixer.mixed_feats
# Note: The slicing below is a work-around for an edge-case
# when two cuts have durations that ended with 0.005 (e.g. 10.125 and 5.715)
# - then, the feature extractor "squeezed in" a last extra frame and the simple
# relationship between num_frames and duration we strived for is not true;
# i.e. the duration is 10.125 + 5.715 = 15.84, but the number of frames is
# 1013 + 572 = 1585. If the frame_shift is 0.01, we have gained an extra 0.01s...
if feats.shape[0] - self.num_frames == 1:
feats = feats[: self.num_frames, :]
# TODO(pzelasko): This can sometimes happen in a MixedCut with >= 5 different Cuts,
# with a regular MonoCut at the end, when the mix offsets are floats with a lot of decimals.
# For now we're duplicating the last frame to match the declared "num_frames" of this cut.
if feats.shape[0] - self.num_frames == -1:
feats = np.concatenate((feats, feats[-1:, :]), axis=0)
assert feats.shape[0] == self.num_frames, (
"Inconsistent number of frames in a MixedCut: please report "
"this issue at https://github.com/lhotse-speech/lhotse/issues "
"showing the output of print(cut) or str(cut) on which"
"load_features() was called."
)
return feats
else:
return mixer.unmixed_feats
@rich_exception_info
def load_audio(self, mixed: bool = True) -> Optional[np.ndarray]:
"""
Loads the audios of the source cuts and mix them on-the-fly.
:param mixed: When True (default), returns a mono mix of the underlying tracks.
Otherwise returns a numpy array with the number of channels equal to the number of tracks.
:return: A numpy ndarray with audio samples and with shape ``(num_channels, num_samples)``
"""
if not self.has_recording:
return None
mixer = AudioMixer(
self.tracks[0].cut.load_audio(),
sampling_rate=self.tracks[0].cut.sampling_rate,
)
for track in self.tracks[1:]:
try:
mixer.add_to_mix(
audio=track.cut.load_audio(),
snr=track.snr,
offset=track.offset,
)
except NonPositiveEnergyError as e:
logging.warning(
str(e) + f' MonoCut with id "{track.cut.id}" will not be mixed in.'
)
if mixed:
# Off-by-one errors can happen during mixing due to imperfect float arithmetic and rounding;
# we will fix them on-the-fly so that the manifest does not lie about the num_samples.
audio = mixer.mixed_audio
if audio.shape[1] - self.num_samples == 1:
audio = audio[:, : self.num_samples]
if audio.shape[1] - self.num_samples == -1:
audio = np.concatenate((audio, audio[:, -1:]), axis=1)
assert audio.shape[1] == self.num_samples, (
f"Inconsistent number of samples in a MixedCut: please report "
f"this issue at https://github.com/lhotse-speech/lhotse/issues "
f"showing the cut below. MixedCut:\n{self}"
)
else:
audio = mixer.unmixed_audio
return audio
def plot_tracks_features(self):
"""
Display the feature matrix as an image. Requires matplotlib to be installed.
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(len(self.tracks))
features = self.load_features(mixed=False)
fmin, fmax = features.min(), features.max()
for idx, ax in enumerate(axes):
ax.imshow(np.flip(features[idx].transpose(1, 0), 0), vmin=fmin, vmax=fmax)
return axes
def plot_tracks_audio(self):
"""
Display plots of the individual tracks' waveforms. Requires matplotlib to be installed.
"""
import matplotlib.pyplot as plt
audio = self.load_audio(mixed=False)
fig, axes = plt.subplots(len(self.tracks), sharex=False, sharey=True)
for idx, (track, ax) in enumerate(zip(self.tracks, axes)):
samples = audio[idx, :]
ax.plot(np.linspace(0, self.duration, len(samples)), samples)
for supervision in track.cut.supervisions:
supervision = supervision.trim(track.cut.duration)
ax.axvspan(
track.offset + supervision.start,
track.offset + supervision.end,
color="green",
alpha=0.1,
)
return axes
def drop_features(self) -> "MixedCut":
"""Return a copy of the current :class:`MixedCut`, detached from ``features``."""
assert (
self.has_recording
), f"Cannot detach features from a MixedCut with no Recording (cut ID = {self.id})."
return fastcopy(
self, tracks=[fastcopy(t, cut=t.cut.drop_features()) for t in self.tracks]
)
def drop_recording(self) -> "MixedCut":
"""Return a copy of the current :class:`.MixedCut`, detached from ``recording``."""
assert (
self.has_features
), f"Cannot detach recording from a MixedCut with no Features (cut ID = {self.id})."
return fastcopy(
self, tracks=[fastcopy(t, cut=t.cut.drop_recording()) for t in self.tracks]
)
def drop_supervisions(self) -> "MixedCut":
"""Return a copy of the current :class:`.MixedCut`, detached from ``supervisions``."""
return fastcopy(
self,
tracks=[fastcopy(t, cut=t.cut.drop_supervisions()) for t in self.tracks],
)
def compute_and_store_features(
self,
extractor: FeatureExtractor,
storage: FeaturesWriter,
augment_fn: Optional[AugmentFn] = None,
mix_eagerly: bool = True,
) -> Cut:
"""
Compute the features from this cut, store them on disk, and create a new `MonoCut` object with the
feature manifest attached. This cut has to be able to load audio.
:param extractor: a ``FeatureExtractor`` instance used to compute the features.
:param storage: a ``FeaturesWriter`` instance used to store the features.
:param augment_fn: an optional callable used for audio augmentation.
:param mix_eagerly: when False, extract and store the features for each track separately,
and mix them dynamically when loading the features.
When True, mix the audio first and store the mixed features, returning a new ``MonoCut`` instance
with the same ID. The returned ``MonoCut`` will not have a ``Recording`` attached.
:return: a new ``MonoCut`` instance if ``mix_eagerly`` is True, or returns ``self``
with each of the tracks containing the ``Features`` manifests.
"""
if mix_eagerly:
features_info = extractor.extract_from_samples_and_store(
samples=self.load_audio(),
storage=storage,
sampling_rate=self.sampling_rate,
offset=0,
channel=0,
augment_fn=augment_fn,
)
return MonoCut(
id=self.id,
start=0,
duration=self.duration,
channel=0,
supervisions=self.supervisions,
features=features_info,
recording=None,
custom=self.custom if hasattr(self, "custom") else None,
)
else: # mix lazily
new_tracks = [
MixTrack(
cut=track.cut.compute_and_store_features(
extractor=extractor,
storage=storage,
augment_fn=augment_fn,
),
offset=track.offset,
snr=track.snr,
)
for track in self.tracks
]
return MixedCut(id=self.id, tracks=new_tracks)
def map_supervisions(
self, transform_fn: Callable[[SupervisionSegment], SupervisionSegment]
) -> Cut:
"""
Modify the SupervisionSegments by `transform_fn` of this MixedCut.
:param transform_fn: a function that modifies a supervision as an argument.
:return: a modified MixedCut.
"""
new_mixed_cut = fastcopy(self)
for track in new_mixed_cut.tracks:
track.cut.supervisions = [
segment.map(transform_fn) for segment in track.cut.supervisions
]
return new_mixed_cut
def filter_supervisions(
self, predicate: Callable[[SupervisionSegment], bool]
) -> Cut:
"""
Modify cut to store only supervisions accepted by `predicate`
Example:
>>> cut = cut.filter_supervisions(lambda s: s.id in supervision_ids)
>>> cut = cut.filter_supervisions(lambda s: s.duration < 5.0)
>>> cut = cut.filter_supervisions(lambda s: s.text is not None)
:param predicate: A callable that accepts `SupervisionSegment` and returns bool
:return: a modified MonoCut
"""
new_mixed_cut = fastcopy(
self,
tracks=[
fastcopy(track, cut=track.cut.filter_supervisions(predicate))
for track in self.tracks
],
)
return new_mixed_cut
@staticmethod
def from_dict(data: dict) -> "MixedCut":
return MixedCut(
id=data["id"],
tracks=[MixTrack.from_dict(track) for track in data["tracks"]],
)
def with_features_path_prefix(self, path: Pathlike) -> "MixedCut":
if not self.has_features:
return self
return MixedCut(
id=self.id,
tracks=[
fastcopy(t, cut=t.cut.with_features_path_prefix(path))
for t in self.tracks
],
)
def with_recording_path_prefix(self, path: Pathlike) -> "MixedCut":
if not self.has_recording:
return self
return MixedCut(
id=self.id,
tracks=[
fastcopy(t, cut=t.cut.with_recording_path_prefix(path))
for t in self.tracks
],
)
@property
def _first_non_padding_cut(self) -> MonoCut:
return [t.cut for t in self.tracks if not isinstance(t.cut, PaddingCut)][0]
class CutSet(Serializable, Sequence[Cut]):
"""
:class:`~lhotse.cut.CutSet` represents a collection of cuts, indexed by cut IDs.
CutSet ties together all types of data -- audio, features and supervisions, and is suitable to represent
training/dev/test sets.
.. note::
:class:`~lhotse.cut.CutSet` is the basic building block of PyTorch-style Datasets for speech/audio processing tasks.
When coming from Kaldi, there is really no good equivalent -- the closest concept may be Kaldi's "egs" for training
neural networks, which are chunks of feature matrices and corresponding alignments used respectively as inputs and
supervisions. :class:`~lhotse.cut.CutSet` is different because it provides you with all kinds of metadata,
and you can select just the interesting bits to feed them to your models.
:class:`~lhotse.cut.CutSet` can be created from any combination of :class:`~lhotse.audio.RecordingSet`,
:class:`~lhotse.supervision.SupervisionSet`, and :class:`~lhotse.features.base.FeatureSet`
with :meth:`lhotse.cut.CutSet.from_manifests`::
>>> from lhotse import CutSet
>>> cuts = CutSet.from_manifests(recordings=my_recording_set)
>>> cuts2 = CutSet.from_manifests(features=my_feature_set)
>>> cuts3 = CutSet.from_manifests(
... recordings=my_recording_set,
... features=my_feature_set,
... supervisions=my_supervision_set,
... )
When creating a :class:`.CutSet` with :meth:`.CutSet.from_manifests`, the resulting cuts will have the same duration
as the input recordings or features. For long recordings, it is not viable for training.
We provide several methods to transform the cuts into shorter ones.
Consider the following scenario::
Recording
|-------------------------------------------|
"Hey, Matt!" "Yes?" "Oh, nothing"
|----------| |----| |-----------|
.......... CutSet.from_manifests() ..........
Cut1
|-------------------------------------------|
............. Example CutSet A ..............
Cut1 Cut2 Cut3
|----------| |----| |-----------|
............. Example CutSet B ..............
Cut1 Cut2
|---------------------||--------------------|
............. Example CutSet C ..............
Cut1 Cut2
|---| |------|
The CutSet's A, B and C can be created like::
>>> cuts_A = cuts.trim_to_supervisions()
>>> cuts_B = cuts.cut_into_windows(duration=5.0)
>>> cuts_C = cuts.trim_to_unsupervised_segments()
.. note::
Some operations support parallel execution via an optional ``num_jobs`` parameter.
By default, all processing is single-threaded.
.. caution::
Operations on cut sets are not mutating -- they return modified copies of :class:`.CutSet` objects,
leaving the original object unmodified (and all of its cuts are also unmodified).
:class:`~lhotse.cut.CutSet` can be stored and read from JSON, JSONL, etc. and supports optional gzip compression::
>>> cuts.to_file('cuts.jsonl.gz')
>>> cuts4 = CutSet.from_file('cuts.jsonl.gz')
It behaves similarly to a ``dict``::
>>> 'rec1-1-0' in cuts
True
>>> cut = cuts['rec1-1-0']
>>> for cut in cuts:
>>> pass
>>> len(cuts)
127
:class:`~lhotse.cut.CutSet` has some convenience properties and methods to gather information about the dataset::
>>> ids = list(cuts.ids)
>>> speaker_id_set = cuts.speakers
>>> # The following prints a message:
>>> cuts.describe()
Cuts count: 547
Total duration (hours): 326.4
Speech duration (hours): 79.6 (24.4%)
***
Duration statistics (seconds):
mean 2148.0
std 870.9
min 477.0
25% 1523.0
50% 2157.0
75% 2423.0
max 5415.0
dtype: float64
Manipulation examples::
>>> longer_than_5s = cuts.filter(lambda c: c.duration > 5)
>>> first_100 = cuts.subset(first=100)
>>> split_into_4 = cuts.split(num_splits=4)
>>> shuffled = cuts.shuffle()
>>> random_sample = cuts.sample(n_cuts=10)
>>> new_ids = cuts.modify_ids(lambda c: c.id + '-newid')
These operations can be composed to implement more complex operations, e.g.
bucketing by duration:
>>> buckets = cuts.sort_by_duration().split(num_splits=30)
Cuts in a :class:`.CutSet` can be detached from parts of their metadata::
>>> cuts_no_feat = cuts.drop_features()
>>> cuts_no_rec = cuts.drop_recordings()
>>> cuts_no_sup = cuts.drop_supervisions()
Sometimes specific sorting patterns are useful when a small CutSet represents a mini-batch::
>>> cuts = cuts.sort_by_duration(ascending=False)
>>> cuts = cuts.sort_like(other_cuts)
:class:`~lhotse.cut.CutSet` offers some batch processing operations::
>>> cuts = cuts.pad(num_frames=300) # or duration=30.0
>>> cuts = cuts.truncate(max_duration=30.0, offset_type='start') # truncate from start to 30.0s
>>> cuts = cuts.mix(other_cuts, snr=[10, 30], mix_prob=0.5)
:class:`~lhotse.cut.CutSet` supports lazy data augmentation/transformation methods which require adjusting some information
in the manifest (e.g., ``num_samples`` or ``duration``).
Note that in the following examples, the audio is untouched -- the operations are stored in the manifest,
and executed upon reading the audio::
>>> cuts_sp = cuts.perturb_speed(factor=1.1)
>>> cuts_vp = cuts.perturb_volume(factor=2.)
>>> cuts_24k = cuts.resample(24000)
.. caution::
If the :class:`.CutSet` contained :class:`~lhotse.features.base.Features` manifests, they will be
detached after performing audio augmentations such as :meth:`.CutSet.perturb_speed` or :meth:`.CutSet.resample` or :meth:`.CutSet.perturb_volume`.
:class:`~lhotse.cut.CutSet` offers parallel feature extraction capabilities
(see `meth`:.CutSet.compute_and_store_features: for details),
and can be used to estimate global mean and variance::
>>> from lhotse import Fbank
>>> cuts = CutSet()
>>> cuts = cuts.compute_and_store_features(
... extractor=Fbank(),
... storage_path='/data/feats',
... num_jobs=4
... )
>>> mvn_stats = cuts.compute_global_feature_stats('/data/features/mvn_stats.pkl', max_cuts=10000)
See also:
- :class:`~lhotse.cut.Cut`
"""
def __init__(self, cuts: Optional[Mapping[str, Cut]] = None) -> None:
self.cuts = ifnone(cuts, {})
def __eq__(self, other: "CutSet") -> bool:
return self.cuts == other.cuts
@property
def is_lazy(self) -> bool:
"""
Indicates whether this manifest was opened in lazy (read-on-the-fly) mode or not.
"""
from lhotse.serialization import LazyJsonlIterator
return isinstance(self.cuts, LazyJsonlIterator)
@property
def mixed_cuts(self) -> Dict[str, MixedCut]:
return {id_: cut for id_, cut in self.cuts.items() if isinstance(cut, MixedCut)}
@property
def simple_cuts(self) -> Dict[str, MonoCut]:
return {id_: cut for id_, cut in self.cuts.items() if isinstance(cut, MonoCut)}
@property
def ids(self) -> Iterable[str]:
return self.cuts.keys()
@property
def speakers(self) -> FrozenSet[str]:
return frozenset(
supervision.speaker for cut in self for supervision in cut.supervisions
)
@staticmethod
def from_cuts(cuts: Iterable[Cut]) -> "CutSet":
return CutSet(cuts=index_by_id_and_check(cuts))
@staticmethod
def from_manifests(
recordings: Optional[RecordingSet] = None,
supervisions: Optional[SupervisionSet] = None,
features: Optional[FeatureSet] = None,
random_ids: bool = False,
) -> "CutSet":
"""
Create a CutSet from any combination of supervision, feature and recording manifests.
At least one of ``recordings`` or ``features`` is required.
The created cuts will be of type :class:`.MonoCut`, even when the recordings have multiple channels.
The :class:`.MonoCut` boundaries correspond to those found in the ``features``, when available,
otherwise to those found in the ``recordings``.
When ``supervisions`` are provided, we'll be searching them for matching recording IDs
and attaching to created cuts, assuming they are fully within the cut's time span.
:param recordings: an optional :class:`~lhotse.audio.RecordingSet` manifest.
:param supervisions: an optional :class:`~lhotse.supervision.SupervisionSet` manifest.
:param features: an optional :class:`~lhotse.features.base.FeatureSet` manifest.
:param random_ids: boolean, should the cut IDs be randomized. By default, use the recording ID
with a loop index and a channel idx, i.e. "{recording_id}-{idx}-{channel}")
:return: a new :class:`.CutSet` instance.
"""
assert (
features is not None or recordings is not None
), "At least one of 'features' or 'recordings' has to be provided."
sup_ok, feat_ok, rec_ok = (
supervisions is not None,
features is not None,
recordings is not None,
)
if feat_ok:
# Case I: Features are provided.
# Use features to determine the cut boundaries and attach recordings and supervisions as available.
return CutSet.from_cuts(
MonoCut(
id=str(uuid4())
if random_ids
else f"{feats.recording_id}-{idx}-{feats.channels}",
start=feats.start,
duration=feats.duration,
channel=feats.channels,
features=feats,
recording=recordings[feats.recording_id] if rec_ok else None,
# The supervisions' start times are adjusted if the features object starts at time other than 0s.
supervisions=list(
supervisions.find(
recording_id=feats.recording_id,
channel=feats.channels,
start_after=feats.start,
end_before=feats.end,
adjust_offset=True,
)
)
if sup_ok
else [],
)
for idx, feats in enumerate(features)
)
# Case II: Recordings are provided (and features are not).
# Use recordings to determine the cut boundaries.
return CutSet.from_cuts(
MonoCut(
id=str(uuid4()) if random_ids else f"{recording.id}-{ridx}-{cidx}",
start=0,
duration=recording.duration,
channel=channel,
recording=recording,
supervisions=list(
supervisions.find(recording_id=recording.id, channel=channel)
)
if sup_ok
else [],
)
for ridx, recording in enumerate(recordings)
# A single cut always represents a single channel. When a recording has multiple channels,
# we create a new cut for each channel separately.
for cidx, channel in enumerate(recording.channel_ids)
)
@staticmethod
def from_dicts(data: Iterable[dict]) -> "CutSet":
def deserialize_one(raw_cut: dict) -> Cut:
cut_type = raw_cut.pop("type")
if cut_type == "MonoCut":
return MonoCut.from_dict(raw_cut)
if cut_type == "Cut":
warnings.warn(
"Your manifest was created with Lhotse version earlier than v0.8, when MonoCut was called Cut. "
"Please re-generate it with Lhotse v0.8 as it might stop working in a future version "
"(using manifest.from_file() and then manifest.to_file() should be sufficient)."
)
return MonoCut.from_dict(raw_cut)
if cut_type == "MixedCut":
return MixedCut.from_dict(raw_cut)
raise ValueError(
f"Unexpected cut type during deserialization: '{cut_type}'"
)
return CutSet.from_cuts(deserialize_one(cut) for cut in data)
def to_dicts(self) -> Iterable[dict]:
return (cut.to_dict() for cut in self)
def describe(self) -> None:
"""
Print a message describing details about the ``CutSet`` - the number of cuts and the
duration statistics, including the total duration and the percentage of speech segments.
Example output:
Cuts count: 547
Total duration (hours): 326.4
Speech duration (hours): 79.6 (24.4%)
***
Duration statistics (seconds):
mean 2148.0
std 870.9
min 477.0
25% 1523.0
50% 2157.0
75% 2423.0
max 5415.0
dtype: float64
"""
durations = np.array([c.duration for c in self])
speech_durations = np.array(
[s.trim(c.duration).duration for c in self for s in c.supervisions]
)
total_sum = durations.sum()
speech_sum = speech_durations.sum()
print("Cuts count:", len(self))
print(f"Total duration (hours): {total_sum / 3600:.1f}")
print(
f"Speech duration (hours): {speech_sum / 3600:.1f} ({speech_sum / total_sum:.1%})"
)
print("***")
print("Duration statistics (seconds):")
print(f"mean\t{np.mean(durations):.1f}")
print(f"std\t{np.std(durations):.1f}")
print(f"min\t{np.min(durations):.1f}")
print(f"25%\t{np.percentile(durations, 25):.1f}")
print(f"50%\t{np.median(durations):.1f}")
print(f"75%\t{np.percentile(durations, 75):.1f}")
print(f"max\t{np.max(durations):.1f}")
def shuffle(self, rng: Optional[random.Random] = None) -> "CutSet":
"""
Shuffle the cut IDs in the current :class:`.CutSet` and return a shuffled copy of self.
:param rng: an optional instance of ``random.Random`` for precise control of randomness.
:return: a shuffled copy of self.
"""
if rng is None:
rng = random
ids = list(self.ids)
rng.shuffle(ids)
return CutSet(cuts={cid: self[cid] for cid in ids})
def split(
self, num_splits: int, shuffle: bool = False, drop_last: bool = False
) -> List["CutSet"]:
"""
Split the :class:`~lhotse.CutSet` into ``num_splits`` pieces of equal size.
:param num_splits: Requested number of splits.
:param shuffle: Optionally shuffle the recordings order first.
:param drop_last: determines how to handle splitting when ``len(seq)`` is not divisible
by ``num_splits``. When ``False`` (default), the splits might have unequal lengths.
When ``True``, it may discard the last element in some splits to ensure they are
equally long.
:return: A list of :class:`~lhotse.CutSet` pieces.
"""
return [
CutSet.from_cuts(subset)
for subset in split_sequence(
self, num_splits=num_splits, shuffle=shuffle, drop_last=drop_last
)
]
def subset(
self,
*, # only keyword arguments allowed
supervision_ids: Optional[Iterable[str]] = None,
cut_ids: Optional[Iterable[str]] = None,
first: Optional[int] = None,
last: Optional[int] = None,
) -> "CutSet":
"""
Return a new ``CutSet`` according to the selected subset criterion.
Only a single argument to ``subset`` is supported at this time.
Example:
>>> cuts = CutSet.from_yaml('path/to/cuts')
>>> train_set = cuts.subset(supervision_ids=train_ids)
>>> test_set = cuts.subset(supervision_ids=test_ids)
:param supervision_ids: List of supervision IDs to keep.
:param cut_ids: List of cut IDs to keep.
The returned :class:`.CutSet` preserves the order of `cut_ids`.
:param first: int, the number of first cuts to keep.
:param last: int, the number of last cuts to keep.
:return: a new ``CutSet`` with the subset results.
"""
assert exactly_one_not_null(
supervision_ids, cut_ids, first, last
), "subset() can handle only one non-None arg."
if first is not None:
assert first > 0
if first > len(self):
logging.warning(
f"CutSet has only {len(self)} items but first {first} required; not doing anything."
)
return self
return CutSet.from_cuts(islice(self, first))
if last is not None:
assert last > 0
if last > len(self):
logging.warning(
f"CutSet has only {len(self)} items but last {last} required; not doing anything."
)
return self
cut_ids = list(self.ids)[-last:]
return CutSet.from_cuts(self[cid] for cid in cut_ids)
if supervision_ids is not None:
# Remove cuts without supervisions
supervision_ids = set(supervision_ids)
return CutSet.from_cuts(
cut.filter_supervisions(lambda s: s.id in supervision_ids)
for cut in self
if any(s.id in supervision_ids for s in cut.supervisions)
)
if cut_ids is not None:
cut_ids = list(cut_ids) # Remember the original order
id_set = frozenset(cut_ids) # Make a set for quick lookup
# Iteration makes it possible to subset lazy manifests
cuts = CutSet.from_cuts(cut for cut in self if cut.id in id_set)
if len(cuts) < len(cut_ids):
logging.warning(
f"In CutSet.subset(cut_ids=...): expected {len(cut_ids)} cuts but got {len(cuts)} "
f"instead ({len(cut_ids) - len(cuts)} cut IDs were not in the CutSet)."
)
# Restore the requested cut_ids order.
return CutSet.from_cuts(cuts[cid] for cid in cut_ids)
def filter_supervisions(
self, predicate: Callable[[SupervisionSegment], bool]
) -> "CutSet":
"""
Return a new CutSet with Cuts containing only `SupervisionSegments` satisfying `predicate`
Cuts without supervisions are preserved
Example:
>>> cuts = CutSet.from_yaml('path/to/cuts')
>>> at_least_five_second_supervisions = cuts.filter_supervisions(lambda s: s.duration >= 5)
:param predicate: A callable that accepts `SupervisionSegment` and returns bool
:return: a CutSet with filtered supervisions
"""
return CutSet.from_cuts(cut.filter_supervisions(predicate) for cut in self)
def filter(self, predicate: Callable[[Cut], bool]) -> "CutSet":
"""
Return a new CutSet with the Cuts that satisfy the `predicate`.
:param predicate: a function that takes a cut as an argument and returns bool.
:return: a filtered CutSet.
"""
return CutSet.from_cuts(cut for cut in self if predicate(cut))
def trim_to_supervisions(
self,
keep_overlapping: bool = True,
min_duration: Optional[Seconds] = None,
context_direction: Literal["center", "left", "right", "random"] = "center",
num_jobs: int = 1,
) -> "CutSet":
"""
Return a new CutSet with Cuts that have identical spans as their supervisions.
For example, the following cut::
Cut
|-----------------|
Sup1
|----| Sup2
|-----------|
is transformed into two cuts::
Cut1
|----|
Sup1
|----|
Sup2
|-|
Cut2
|-----------|
Sup1
|-|
Sup2
|-----------|
:param keep_overlapping: when ``False``, it will discard parts of other supervisions that overlap with the
main supervision. In the illustration above, it would discard ``Sup2`` in ``Cut1`` and ``Sup1`` in ``Cut2``.
:param min_duration: An optional duration in seconds; specifying this argument will extend the cuts
that would have been shorter than ``min_duration`` with actual acoustic context in the recording/features.
If there are supervisions present in the context, they are kept when ``keep_overlapping`` is true.
If there is not enough context, the returned cut will be shorter than ``min_duration``.
If the supervision segment is longer than ``min_duration``, the return cut will be longer.
:param context_direction: Which direction should the cut be expanded towards to include context.
The value of "center" implies equal expansion to left and right;
random uniformly samples a value between "left" and "right".
:param num_jobs: Number of parallel workers to process the cuts.
:return: a ``CutSet``.
"""
if num_jobs == 1:
return CutSet.from_cuts(
# chain.from_iterable is a flatten operation: Iterable[Iterable[T]] -> Iterable[T]
chain.from_iterable(
cut.trim_to_supervisions(
keep_overlapping=keep_overlapping,
min_duration=min_duration,
context_direction=context_direction,
)
for cut in self
)
)
from lhotse.manipulation import split_parallelize_combine
result = split_parallelize_combine(
num_jobs,
self,
CutSet.trim_to_supervisions,
keep_overlapping=keep_overlapping,
min_duration=min_duration,
context_direction=context_direction,
)
return result
def trim_to_unsupervised_segments(self) -> "CutSet":
"""
Return a new CutSet with Cuts created from segments that have no supervisions (likely
silence or noise).
:return: a ``CutSet``.
"""
from cytoolz import sliding_window
cuts = []
for cut in self:
segments = []
supervisions = sorted(cut.supervisions, key=lambda s: s.start)
# Check if there is an unsupervised segment at the start of the cut,
# before the first supervision.
if supervisions[0].start > 0:
segments.append((0, supervisions[0].start))
# Check if there are unsupervised segments between the supervisions.
for left, right in sliding_window(2, supervisions):
if overlaps(left, right) or left.end == right.start:
continue
segments.append((left.end, right.start))
# Check if there is an unsupervised segment after the last supervision,
# before the cut ends.
if supervisions[-1].end < cut.duration:
segments.append((supervisions[-1].end, cut.duration))
# Create cuts from all found unsupervised segments.
for start, end in segments:
cuts.append(cut.truncate(offset=start, duration=end - start))
return CutSet.from_cuts(cuts)
def mix_same_recording_channels(self) -> "CutSet":
"""
Find cuts that come from the same recording and have matching start and end times, but
represent different channels. Then, mix them together (in matching groups) and return
a new ``CutSet`` that contains their mixes. This is useful for processing microphone array
recordings.
It is intended to be used as the first operation after creating a new ``CutSet`` (but
might also work in other circumstances, e.g. if it was cut to windows first).
Example:
>>> ami = prepare_ami('path/to/ami')
>>> cut_set = CutSet.from_manifests(recordings=ami['train']['recordings'])
>>> multi_channel_cut_set = cut_set.mix_same_recording_channels()
In the AMI example, the ``multi_channel_cut_set`` will yield MixedCuts that hold all single-channel
Cuts together.
"""
if self.mixed_cuts:
raise ValueError(
"This operation is not applicable to CutSet's containing MixedCut's."
)
from cytoolz.itertoolz import groupby
groups = groupby(lambda cut: (cut.recording.id, cut.start, cut.end), self)
return CutSet.from_cuts(mix_cuts(cuts) for cuts in groups.values())
def sort_by_duration(self, ascending: bool = False) -> "CutSet":
"""
Sort the CutSet according to cuts duration and return the result. Descending by default.
"""
return CutSet.from_cuts(
sorted(self, key=(lambda cut: cut.duration), reverse=not ascending)
)
def sort_like(self, other: "CutSet") -> "CutSet":
"""
Sort the CutSet according to the order of cut IDs in ``other`` and return the result.
"""
assert set(self.ids) == set(
other.ids
), "sort_like() expects both CutSet's to have identical cut IDs."
return CutSet.from_cuts(self[cid] for cid in other.ids)
def index_supervisions(
self, index_mixed_tracks: bool = False, keep_ids: Optional[Set[str]] = None
) -> Dict[str, IntervalTree]:
"""
Create a two-level index of supervision segments. It is a mapping from a Cut's ID to an
interval tree that contains the supervisions of that Cut.
The interval tree can be efficiently queried for overlapping and/or enveloping segments.
It helps speed up some operations on Cuts of very long recordings (1h+) that contain many
supervisions.
:param index_mixed_tracks: Should the tracks of MixedCut's be indexed as additional, separate entries.
:param keep_ids: If specified, we will only index the supervisions with the specified IDs.
:return: a mapping from MonoCut ID to an interval tree of SupervisionSegments.
"""
indexed = {}
for cut in self:
indexed.update(
cut.index_supervisions(
index_mixed_tracks=index_mixed_tracks, keep_ids=keep_ids
)
)
return indexed
def pad(
self,
duration: Seconds = None,
num_frames: int = None,
num_samples: int = None,
pad_feat_value: float = LOG_EPSILON,
direction: str = "right",
preserve_id: bool = False,
pad_value_dict: Optional[Dict[str, Union[int, float]]] = None,
) -> "CutSet":
"""
Return a new CutSet with Cuts padded to ``duration``, ``num_frames`` or ``num_samples``.
Cuts longer than the specified argument will not be affected.
By default, cuts will be padded to the right (i.e. after the signal).
When none of ``duration``, ``num_frames``, or ``num_samples`` is specified,
we'll try to determine the best way to pad to the longest cut based on
whether features or recordings are available.
:param duration: The cuts minimal duration after padding.
When not specified, we'll choose the duration of the longest cut in the CutSet.
:param num_frames: The cut's total number of frames after padding.
:param num_samples: The cut's total number of samples after padding.
:param pad_feat_value: A float value that's used for padding the features.
By default we assume a log-energy floor of approx. -23 (1e-10 after exp).
:param direction: string, 'left', 'right' or 'both'. Determines whether the padding is added
before or after the cut.
:param preserve_id: When ``True``, preserves the cut ID from before padding.
Otherwise, generates a new random ID (default).
:param pad_value_dict: Optional dict that specifies what value should be used
for padding arrays in custom attributes.
:return: A padded CutSet.
"""
# When the user does not specify explicit padding duration/num_frames/num_samples,
# we'll try to pad using frames if there are features,
# otherwise using samples if there are recordings,
# otherwise duration which is always there.
if all(arg is None for arg in (duration, num_frames, num_samples)):
if all(c.has_features for c in self):
num_frames = max(c.num_frames for c in self)
elif all(c.has_recording for c in self):
num_samples = max(c.num_samples for c in self)
else:
duration = max(cut.duration for cut in self)
return CutSet.from_cuts(
cut.pad(
duration=duration,
num_frames=num_frames,
num_samples=num_samples,
pad_feat_value=pad_feat_value,
direction=direction,
preserve_id=preserve_id,
pad_value_dict=pad_value_dict,
)
for cut in self
)
def truncate(
self,
max_duration: Seconds,
offset_type: str,
keep_excessive_supervisions: bool = True,
preserve_id: bool = False,
) -> "CutSet":
"""
Return a new CutSet with the Cuts truncated so that their durations are at most `max_duration`.
Cuts shorter than `max_duration` will not be changed.
:param max_duration: float, the maximum duration in seconds of a cut in the resulting manifest.
:param offset_type: str, can be:
- 'start' => cuts are truncated from their start;
- 'end' => cuts are truncated from their end minus max_duration;
- 'random' => cuts are truncated randomly between their start and their end minus max_duration
:param keep_excessive_supervisions: bool. When a cut is truncated in the middle of a supervision segment,
should the supervision be kept.
:param preserve_id: bool. Should the truncated cut keep the same ID or get a new, random one.
:return: a new CutSet instance with truncated cuts.
"""
truncated_cuts = []
for cut in self:
if cut.duration <= max_duration:
truncated_cuts.append(cut)
continue
def compute_offset():
if offset_type == "start":
return 0.0
last_offset = cut.duration - max_duration
if offset_type == "end":
return last_offset
if offset_type == "random":
return random.uniform(0.0, last_offset)
raise ValueError(f"Unknown 'offset_type' option: {offset_type}")
truncated_cuts.append(
cut.truncate(
offset=compute_offset(),
duration=max_duration,
keep_excessive_supervisions=keep_excessive_supervisions,
preserve_id=preserve_id,
)
)
return CutSet.from_cuts(truncated_cuts)
def cut_into_windows(
self,
duration: Seconds,
keep_excessive_supervisions: bool = True,
num_jobs: int = 1,
) -> "CutSet":
"""
Return a new ``CutSet``, made by traversing each ``MonoCut`` in windows of ``duration`` seconds and
creating new ``MonoCut`` out of them.
The last window might have a shorter duration if there was not enough audio, so you might want to
use either ``.filter()`` or ``.pad()`` afterwards to obtain a uniform duration ``CutSet``.
:param duration: Desired duration of the new cuts in seconds.
:param keep_excessive_supervisions: bool. When a cut is truncated in the middle of a supervision segment,
should the supervision be kept.
:param num_jobs: The number of parallel workers.
:return: a new CutSet with cuts made from shorter duration windows.
"""
if num_jobs == 1:
new_cuts = []
for cut in self:
n_windows = ceil(cut.duration / duration)
for i in range(n_windows):
new_cuts.append(
cut.truncate(
offset=duration * i,
duration=duration,
keep_excessive_supervisions=keep_excessive_supervisions,
)
)
return CutSet(cuts={c.id: c for c in new_cuts})
from lhotse.manipulation import split_parallelize_combine
result = split_parallelize_combine(
num_jobs,
self,
CutSet.cut_into_windows,
duration=duration,
keep_excessive_supervisions=keep_excessive_supervisions,
)
return result
def sample(self, n_cuts: int = 1) -> Union[Cut, "CutSet"]:
"""
Randomly sample this ``CutSet`` and return ``n_cuts`` cuts.
When ``n_cuts`` is 1, will return a single cut instance; otherwise will return a ``CutSet``.
"""
assert n_cuts > 0
# TODO: We might want to make this more efficient in the future
# by holding a cached list of cut ids as a member of CutSet...
cut_indices = [random.randint(0, len(self) - 1) for _ in range(n_cuts)]
cuts = [self[idx] for idx in cut_indices]
if n_cuts == 1:
return cuts[0]
return CutSet.from_cuts(cuts)
def resample(self, sampling_rate: int, affix_id: bool = False) -> "CutSet":
"""
Return a new :class:`~lhotse.cut.CutSet` that contains cuts resampled to the new
``sampling_rate``. All cuts in the manifest must contain recording information.
If the feature manifests are attached, they are dropped.
:param sampling_rate: The new sampling rate.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the ``CutSet``.
"""
return self.map(lambda cut: cut.resample(sampling_rate, affix_id=affix_id))
def perturb_speed(self, factor: float, affix_id: bool = True) -> "CutSet":
"""
Return a new :class:`~lhotse.cut.CutSet` that contains speed perturbed cuts
with a factor of ``factor``. It requires the recording manifests to be present.
If the feature manifests are attached, they are dropped.
The supervision manifests are modified to reflect the speed perturbed
start times and durations.
:param factor: The resulting playback speed is ``factor`` times the original one.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the ``CutSet``.
"""
return self.map(lambda cut: cut.perturb_speed(factor=factor, affix_id=affix_id))
def perturb_tempo(self, factor: float, affix_id: bool = True) -> "CutSet":
"""
Return a new :class:`~lhotse.cut.CutSet` that contains tempo perturbed cuts
with a factor of ``factor``.
Compared to speed perturbation, tempo preserves pitch.
It requires the recording manifests to be present.
If the feature manifests are attached, they are dropped.
The supervision manifests are modified to reflect the tempo perturbed
start times and durations.
:param factor: The resulting playback tempo is ``factor`` times the original one.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the ``CutSet``.
"""
return self.map(lambda cut: cut.perturb_tempo(factor=factor, affix_id=affix_id))
def perturb_volume(self, factor: float, affix_id: bool = True) -> "CutSet":
"""
Return a new :class:`~lhotse.cut.CutSet` that contains volume perturbed cuts
with a factor of ``factor``. It requires the recording manifests to be present.
If the feature manifests are attached, they are dropped.
The supervision manifests are remaining the same.
:param factor: The resulting playback volume is ``factor`` times the original one.
:param affix_id: Should we modify the ID (useful if both versions of the same
cut are going to be present in a single manifest).
:return: a modified copy of the ``CutSet``.
"""
return self.map(
lambda cut: cut.perturb_volume(factor=factor, affix_id=affix_id)
)
def mix(
self,
cuts: "CutSet",
duration: Optional[Seconds] = None,
snr: Optional[Union[Decibels, Sequence[Decibels]]] = 20,
preserve_id: Optional[str] = None,
mix_prob: float = 1.0,
) -> "CutSet":
"""
Mix cuts in this ``CutSet`` with randomly sampled cuts from another ``CutSet``.
A typical application would be data augmentation with noise, music, babble, etc.
:param cuts: a ``CutSet`` containing cuts to be mixed into this ``CutSet``.
:param duration: an optional float in seconds.
When ``None``, we will preserve the duration of the cuts in ``self``
(i.e. we'll truncate the mix if it exceeded the original duration).
Otherwise, we will keep sampling cuts to mix in until we reach the specified
``duration`` (and truncate to that value, should it be exceeded).
:param snr: an optional float, or pair (range) of floats, in decibels.
When it's a single float, we will mix all cuts with this SNR level
(where cuts in ``self`` are treated as signals, and cuts in ``cuts`` are treated as noise).
When it's a pair of floats, we will uniformly sample SNR values from that range.
When ``None``, we will mix the cuts without any level adjustment
(could be too noisy for data augmentation).
:param preserve_id: optional string ("left", "right"). when specified, append will preserve the cut id
of the left- or right-hand side argument. otherwise, a new random id is generated.
:param mix_prob: an optional float in range [0, 1].
Specifies the probability of performing a mix.
Values lower than 1.0 mean that some cuts in the output will be unchanged.
:return: a new ``CutSet`` with mixed cuts.
"""
assert 0.0 <= mix_prob <= 1.0
assert duration is None or duration > 0
if isinstance(snr, (tuple, list)):
assert (
len(snr) == 2
), f"SNR range must be a list or tuple with exactly two values (got: {snr})"
else:
assert isinstance(snr, (type(None), int, float))
mixed_cuts = []
for cut in self:
# Check whether we're going to mix something into the current cut
# or pass it through unchanged.
if random.uniform(0.0, 1.0) > mix_prob:
mixed_cuts.append(cut)
continue
# Randomly sample a new cut from "cuts" to mix in.
to_mix = cuts.sample()
# Determine the SNR - either it's specified or we need to sample one.
snr = random.uniform(*snr) if isinstance(snr, (list, tuple)) else snr
# Actual mixing
mixed = cut.mix(other=to_mix, snr=snr, preserve_id=preserve_id)
# Did the user specify a duration?
# If yes, we will ensure that shorter cuts have more noise mixed in
# to "pad" them with at the end.
if duration is not None:
mixed_in_duration = to_mix.duration
# Keep sampling until we mixed in a "duration" amount of noise.
while mixed_in_duration < duration:
to_mix = cuts.sample()
# Keep the SNR constant for each cut from "self".
mixed = mixed.mix(
other=to_mix,
snr=snr,
offset_other_by=mixed_in_duration,
preserve_id=preserve_id,
)
# Since we're adding floats, we can be off by an epsilon and trigger
# some assertions for exceeding duration; do precautionary rounding here.
mixed_in_duration = round(
mixed_in_duration + to_mix.duration, ndigits=8
)
# We truncate the mixed to either the original duration or the requested duration.
mixed = mixed.truncate(
duration=cut.duration if duration is None else duration,
preserve_id=preserve_id is not None,
)
mixed_cuts.append(mixed)
return CutSet.from_cuts(mixed_cuts)
def drop_features(self) -> "CutSet":
"""
Return a new :class:`.CutSet`, where each :class:`.Cut` is copied and detached from its extracted features.
"""
return CutSet.from_cuts(c.drop_features() for c in self)
def drop_recordings(self) -> "CutSet":
"""
Return a new :class:`.CutSet`, where each :class:`.Cut` is copied and detached from its recordings.
"""
return CutSet.from_cuts(c.drop_recording() for c in self)
def drop_supervisions(self) -> "CutSet":
"""
Return a new :class:`.CutSet`, where each :class:`.Cut` is copied and detached from its supervisions.
"""
return CutSet.from_cuts(c.drop_supervisions() for c in self)
def compute_and_store_features(
self,
extractor: FeatureExtractor,
storage_path: Pathlike,
num_jobs: Optional[int] = None,
augment_fn: Optional[AugmentFn] = None,
storage_type: Type[FW] = LilcomHdf5Writer,
executor: Optional[Executor] = None,
mix_eagerly: bool = True,
progress_bar: bool = True,
) -> "CutSet":
"""
Extract features for all cuts, possibly in parallel,
and store them using the specified storage object.
Examples:
Extract fbank features on one machine using 8 processes,
store arrays partitioned in 8 HDF5 files with lilcom compression:
>>> cuts = CutSet(...)
... cuts.compute_and_store_features(
... extractor=Fbank(),
... storage_path='feats',
... num_jobs=8,
... )
Extract fbank features on one machine using 8 processes,
store each array in a separate file with lilcom compression:
>>> cuts = CutSet(...)
... cuts.compute_and_store_features(
... extractor=Fbank(),
... storage_path='feats',
... num_jobs=8,
... storage_type=LilcomFilesWriter
... )
Extract fbank features on multiple machines using a Dask cluster
with 80 jobs,
store arrays partitioned in 80 HDF5 files with lilcom compression:
>>> from distributed import Client
... cuts = CutSet(...)
... cuts.compute_and_store_features(
... extractor=Fbank(),
... storage_path='feats',
... num_jobs=80,
... executor=Client(...)
... )
Extract fbank features on one machine using 8 processes,
store each array in an S3 bucket (requires ``smart_open``):
>>> cuts = CutSet(...)
... cuts.compute_and_store_features(
... extractor=Fbank(),
... storage_path='s3://my-feature-bucket/my-corpus-features',
... num_jobs=8,
... storage_type=LilcomURLWriter
... )
:param extractor: A ``FeatureExtractor`` instance
(either Lhotse's built-in or a custom implementation).
:param storage_path: The path to location where we will store the features.
The exact type and layout of stored files will be dictated by the
``storage_type`` argument.
:param num_jobs: The number of parallel processes used to extract the features.
We will internally split the CutSet into this many chunks
and process each chunk in parallel.
:param augment_fn: an optional callable used for audio augmentation.
Be careful with the types of augmentations used: if they modify
the start/end/duration times of the cut and its supervisions,
you will end up with incorrect supervision information when using this API.
E.g. for speed perturbation, use ``CutSet.perturb_speed()`` instead.
:param storage_type: a ``FeaturesWriter`` subclass type.
It determines how the features are stored to disk,
e.g. separate file per array, HDF5 files with multiple arrays, etc.
:param executor: when provided, will be used to parallelize the feature extraction process.
By default, we will instantiate a ProcessPoolExecutor.
Learn more about the ``Executor`` API at
https://lhotse.readthedocs.io/en/latest/parallelism.html
:param mix_eagerly: Related to how the features are extracted for ``MixedCut``
instances, if any are present.
When False, extract and store the features for each track separately,
and mix them dynamically when loading the features.
When True, mix the audio first and store the mixed features,
returning a new ``MonoCut`` instance with the same ID.
The returned ``MonoCut`` will not have a ``Recording`` attached.
:param progress_bar: Should a progress bar be displayed (automatically turned off
for parallel computation).
:return: Returns a new ``CutSet`` with ``Features`` manifests attached to the cuts.
"""
from lhotse.manipulation import combine
from cytoolz import identity
# Pre-conditions and args setup
progress = (
identity # does nothing, unless we overwrite it with an actual prog bar
)
if num_jobs is None:
num_jobs = 1
if num_jobs == 1 and executor is not None:
logging.warning(
"Executor argument was passed but num_jobs set to 1: "
"we will ignore the executor and use non-parallel execution."
)
executor = None
# Non-parallel execution
if executor is None and num_jobs == 1:
if progress_bar:
progress = partial(
tqdm, desc="Extracting and storing features", total=len(self)
)
with storage_type(storage_path) as storage:
return CutSet.from_cuts(
progress(
cut.compute_and_store_features(
extractor=extractor,
storage=storage,
augment_fn=augment_fn,
mix_eagerly=mix_eagerly,
)
for cut in self
)
)
# HACK: support URL storage for writing
if "://" in str(storage_path):
# "storage_path" is actually an URL
def sub_storage_path(idx: int) -> str:
return f"{storage_path}/feats-{idx}"
else:
# We are now sure that "storage_path" will be the root for
# multiple feature storages - we can create it as a directory
storage_path = Path(storage_path)
storage_path.mkdir(parents=True, exist_ok=True)
def sub_storage_path(idx: int) -> str:
return storage_path / f"feats-{idx}"
# Parallel execution: prepare the CutSet splits
cut_sets = self.split(num_jobs, shuffle=True)
# Initialize the default executor if None was given
if executor is None:
executor = ProcessPoolExecutor(num_jobs)
# Submit the chunked tasks to parallel workers.
# Each worker runs the non-parallel version of this function inside.
futures = [
executor.submit(
CutSet.compute_and_store_features,
cs,
extractor=extractor,
storage_path=sub_storage_path(i),
augment_fn=augment_fn,
storage_type=storage_type,
mix_eagerly=mix_eagerly,
# Disable individual workers progress bars for readability
progress_bar=False,
)
for i, cs in enumerate(cut_sets)
]
if progress_bar:
progress = partial(
tqdm,
desc="Extracting and storing features (chunks progress)",
total=len(futures),
)
cuts_with_feats = combine(progress(f.result() for f in futures))
return cuts_with_feats
def compute_and_store_features_batch(
self,
extractor: FeatureExtractor,
storage_path: Pathlike,
batch_duration: Seconds = 600.0,
num_workers: int = 4,
augment_fn: Optional[AugmentFn] = None,
storage_type: Type[FW] = LilcomHdf5Writer,
) -> "CutSet":
"""
Extract features for all cuts in batches.
This method is intended for use with compatible feature extractors that
implement an accelerated :meth:`~lhotse.FeatureExtractor.extract_batch` method.
For example, ``kaldifeat`` extractors can be used this way (see, e.g.,
:class:`~lhotse.KaldifeatFbank` or :class:`~lhotse.KaldifeatMfcc`).
When a CUDA GPU is available and enabled for the feature extractor, this can
be much faster than :meth:`.CutSet.compute_and_store_features`.
Otherwise, the speed will be comparable to single-threaded extraction.
Example: extract fbank features on one GPU, using 4 dataloading workers
for reading audio, and store the arrays in an HDF5 file with
lilcom compression::
>>> from lhotse import KaldifeatFbank, KaldifeatFbankConfig
>>> extractor = KaldifeatFbank(KaldifeatFbankConfig(device='cuda'))
>>> cuts = CutSet(...)
... cuts = cuts.compute_and_store_features_batch(
... extractor=extractor,
... storage_path='feats',
... batch_duration=500,
... num_workers=4,
... )
:param extractor: A :class:`~lhotse.features.base.FeatureExtractor` instance,
which should implement an accelerated ``extract_batch`` method.
:param storage_path: The path to location where we will store the features.
The exact type and layout of stored files will be dictated by the
``storage_type`` argument.
:param batch_duration: The maximum number of audio seconds in a batch.
Determines batch size dynamically.
:param num_workers: How many background dataloading workers should be used
for reading the audio.
:param augment_fn: an optional callable used for audio augmentation.
Be careful with the types of augmentations used: if they modify
the start/end/duration times of the cut and its supervisions,
you will end up with incorrect supervision information when using this API.
E.g. for speed perturbation, use ``CutSet.perturb_speed()`` instead.
:param storage_type: a ``FeaturesWriter`` subclass type.
It determines how the features are stored to disk,
e.g. separate file per array, HDF5 files with multiple arrays, etc.
:return: Returns a new ``CutSet`` with ``Features`` manifests attached to the cuts.
"""
import torch
from torch.utils.data import DataLoader
from lhotse.dataset import SingleCutSampler, UnsupervisedWaveformDataset
from lhotse.qa import validate_features
frame_shift = extractor.frame_shift
dataset = UnsupervisedWaveformDataset(collate=False)
sampler = SingleCutSampler(self, max_duration=batch_duration)
dloader = DataLoader(
dataset, batch_size=None, sampler=sampler, num_workers=num_workers
)
cuts_with_feats = []
with storage_type(storage_path) as writer, tqdm(
desc="Computing features in batches", total=sampler.num_cuts
) as progress:
for batch in dloader:
cuts = batch["cuts"]
waves = batch["audio"]
assert all(c.sampling_rate == cuts[0].sampling_rate for c in cuts)
# Optionally apply the augment_fn
if augment_fn is not None:
waves = [
augment_fn(w, c.sampling_rate) for c, w in zip(cuts, waves)
]
# Move the audio data to the right device.
waves = [w.to(extractor.device) for w in waves]
# The actual extraction is here.
with torch.no_grad():
# Note: chunk_size option limits the memory consumption
# for very long cuts.
features = extractor.extract_batch(
waves, sampling_rate=cuts[0].sampling_rate
)
for cut, feat_mtx in zip(cuts, features):
if isinstance(cut, PaddingCut):
# For padding cuts, just fill out the fields in the manfiest
# and don't store anything.
cuts_with_feats.append(
fastcopy(
cut,
num_frames=feat_mtx.shape[0],
num_features=feat_mtx.shape[1],
frame_shift=frame_shift,
)
)
continue
# Store the computed features and describe them in a manifest.
if isinstance(feat_mtx, torch.Tensor):
feat_mtx = feat_mtx.cpu().numpy()
storage_key = writer.write(cut.id, feat_mtx)
feat_manifest = Features(
start=cut.start,
duration=cut.duration,
type=extractor.name,
num_frames=feat_mtx.shape[0],
num_features=feat_mtx.shape[1],
frame_shift=frame_shift,
sampling_rate=cut.sampling_rate,
channels=0,
storage_type=writer.name,
storage_path=str(writer.storage_path),
storage_key=storage_key,
)
validate_features(feat_manifest, feats_data=feat_mtx)
# Update the cut manifest.
if isinstance(cut, MonoCut):
cut = fastcopy(cut, features=feat_manifest)
if isinstance(cut, MixedCut):
# If this was a mixed cut, we will just discard its
# recordings and create a new mono cut that has just
# the features attached.
cut = MonoCut(
id=cut.id,
start=0,
duration=cut.duration,
channel=0,
supervisions=cut.supervisions,
features=feat_manifest,
recording=None,
)
cuts_with_feats.append(cut)
progress.update(len(cuts))
return CutSet.from_cuts(cuts_with_feats)
def compute_and_store_recordings(
self,
storage_path: Pathlike,
num_jobs: Optional[int] = None,
executor: Optional[Executor] = None,
augment_fn: Optional[AugmentFn] = None,
progress_bar: bool = True,
) -> "CutSet":
"""
Store waveforms of all cuts as audio recordings to disk.
:param storage_path: The path to location where we will store the audio recordings.
For each cut, a sub-directory will be created that starts with the first 3
characters of the cut's ID. The audio recording is then stored in the sub-directory
using the cut ID as filename and '.flac' as suffix.
:param num_jobs: The number of parallel processes used to store the audio recordings.
We will internally split the CutSet into this many chunks
and process each chunk in parallel.
:param augment_fn: an optional callable used for audio augmentation.
Be careful with the types of augmentations used: if they modify
the start/end/duration times of the cut and its supervisions,
you will end up with incorrect supervision information when using this API.
E.g. for speed perturbation, use ``CutSet.perturb_speed()`` instead.
:param executor: when provided, will be used to parallelize the process.
By default, we will instantiate a ProcessPoolExecutor.
Learn more about the ``Executor`` API at
https://lhotse.readthedocs.io/en/latest/parallelism.html
:param progress_bar: Should a progress bar be displayed (automatically turned off
for parallel computation).
:return: Returns a new ``CutSet``.
"""
from lhotse.manipulation import combine
from cytoolz import identity
# Pre-conditions and args setup
progress = (
identity # does nothing, unless we overwrite it with an actual prog bar
)
if num_jobs is None:
num_jobs = 1
if num_jobs == 1 and executor is not None:
logging.warning(
"Executor argument was passed but num_jobs set to 1: "
"we will ignore the executor and use non-parallel execution."
)
executor = None
def file_storage_path(cut: Cut, storage_path: Pathlike) -> Path:
# Introduce a sub-directory that starts with the first 3 characters of the cut's ID.
# This allows to avoid filesystem performance problems related to storing
# too many files in a single directory.
subdir = Path(storage_path) / cut.id[:3]
subdir.mkdir(exist_ok=True, parents=True)
return (subdir / cut.id).with_suffix(".flac")
# Non-parallel execution
if executor is None and num_jobs == 1:
if progress_bar:
progress = partial(
tqdm, desc="Storing audio recordings", total=len(self)
)
return CutSet.from_cuts(
progress(
cut.compute_and_store_recording(
storage_path=file_storage_path(cut, storage_path),
augment_fn=augment_fn,
)
for cut in self
)
)
# Parallel execution: prepare the CutSet splits
cut_sets = self.split(num_jobs, shuffle=True)
# Initialize the default executor if None was given
if executor is None:
executor = ProcessPoolExecutor(num_jobs)
# Submit the chunked tasks to parallel workers.
# Each worker runs the non-parallel version of this function inside.
futures = [
executor.submit(
CutSet.compute_and_store_recordings,
cs,
storage_path=storage_path,
augment_fn=augment_fn,
# Disable individual workers progress bars for readability
progress_bar=False,
)
for i, cs in enumerate(cut_sets)
]
if progress_bar:
progress = partial(
tqdm,
desc="Storing audio recordings (chunks progress)",
total=len(futures),
)
cuts = combine(progress(f.result() for f in futures))
return cuts
def compute_global_feature_stats(
self, storage_path: Optional[Pathlike] = None, max_cuts: Optional[int] = None
) -> Dict[str, np.ndarray]:
"""
Compute the global means and standard deviations for each feature bin in the manifest.
It follows the implementation in scikit-learn:
https://github.com/scikit-learn/scikit-learn/blob/0fb307bf39bbdacd6ed713c00724f8f871d60370/sklearn/utils/extmath.py#L715
which follows the paper:
"Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque.
:param storage_path: an optional path to a file where the stats will be stored with pickle.
:param max_cuts: optionally, limit the number of cuts used for stats estimation. The cuts will be
selected randomly in that case.
:return a dict of ``{'norm_means': np.ndarray, 'norm_stds': np.ndarray}`` with the
shape of the arrays equal to the number of feature bins in this manifest.
"""
have_features = [cut.has_features for cut in self]
if not any(have_features):
raise ValueError(
"Could not find any features in this CutSet; did you forget to extract them?"
)
if not all(have_features):
logging.warning(
f"Computing global stats: only {sum(have_features)}/{len(have_features)} cuts have features."
)
return compute_global_stats(
# islice(X, 50) is like X[:50] except it works with lazy iterables
feature_manifests=islice(
(cut.features for cut in self if cut.has_features),
max_cuts if max_cuts is not None else len(self),
),
storage_path=storage_path,
)
def with_features_path_prefix(self, path: Pathlike) -> "CutSet":
return CutSet.from_cuts(c.with_features_path_prefix(path) for c in self)
def with_recording_path_prefix(self, path: Pathlike) -> "CutSet":
return CutSet.from_cuts(c.with_recording_path_prefix(path) for c in self)
def map(self, transform_fn: Callable[[Cut], Cut]) -> "CutSet":
"""
Apply `transform_fn` to the cuts in this :class:`.CutSet` and return a new :class:`.CutSet`.
:param transform_fn: A callable (function) that accepts a single cut instance
and returns a single cut instance.
:return: a new ``CutSet`` with transformed cuts.
"""
def verified(mapped: Any) -> Cut:
assert isinstance(
mapped, (MonoCut, MixedCut, PaddingCut)
), "The callable passed to CutSet.map() must return a Cut class instance."
return mapped
return CutSet.from_cuts(verified(transform_fn(c)) for c in self)
def modify_ids(self, transform_fn: Callable[[str], str]) -> "CutSet":
"""
Modify the IDs of cuts in this ``CutSet``.
Useful when combining multiple ``CutSet``s that were created from a single source,
but contain features with different data augmentations techniques.
:param transform_fn: A callable (function) that accepts a string (cut ID) and returns
a new string (new cut ID).
:return: a new ``CutSet`` with cuts with modified IDs.
"""
return CutSet.from_cuts(c.with_id(transform_fn(c.id)) for c in self)
def map_supervisions(
self, transform_fn: Callable[[SupervisionSegment], SupervisionSegment]
) -> "CutSet":
"""
Modify the SupervisionSegments by `transform_fn` in this CutSet.
:param transform_fn: a function that modifies a supervision as an argument.
:return: a new, modified CutSet.
"""
return CutSet.from_cuts(cut.map_supervisions(transform_fn) for cut in self)
def transform_text(self, transform_fn: Callable[[str], str]) -> "CutSet":
"""
Return a copy of this ``CutSet`` with all ``SupervisionSegments`` text transformed with ``transform_fn``.
Useful for text normalization, phonetic transcription, etc.
:param transform_fn: a function that accepts a string and returns a string.
:return: a new, modified CutSet.
"""
return self.map_supervisions(lambda s: s.transform_text(transform_fn))
def __repr__(self) -> str:
return f"CutSet(len={len(self)})"
def __contains__(self, item: Union[str, Cut]) -> bool:
if isinstance(item, str):
return item in self.cuts
else:
return item.id in self.cuts
def __getitem__(self, cut_id_or_index: Union[int, str]) -> "Cut":
if isinstance(cut_id_or_index, str):
return self.cuts[cut_id_or_index]
# ~100x faster than list(dict.values())[index] for 100k elements
return next(
val for idx, val in enumerate(self.cuts.values()) if idx == cut_id_or_index
)
def __len__(self) -> int:
return len(self.cuts)
def __iter__(self) -> Iterable[Cut]:
return iter(self.cuts.values())
def __add__(self, other: "CutSet") -> "CutSet":
merged_cuts = {**self.cuts, **other.cuts}
assert len(merged_cuts) == len(self.cuts) + len(other.cuts), (
f"Conflicting IDs when concatenating CutSets! "
f"Failed check: {len(merged_cuts)} == {len(self.cuts)} + {len(other.cuts)}"
)
return CutSet(cuts={**self.cuts, **other.cuts})
def make_windowed_cuts_from_features(
feature_set: FeatureSet,
cut_duration: Seconds,
cut_shift: Optional[Seconds] = None,
keep_shorter_windows: bool = False,
) -> CutSet:
"""
Converts a FeatureSet to a CutSet by traversing each Features object in - possibly overlapping - windows, and
creating a MonoCut out of that area. By default, the last window in traversal will be discarded if it cannot satisfy
the `cut_duration` requirement.
:param feature_set: a FeatureSet object.
:param cut_duration: float, duration of created Cuts in seconds.
:param cut_shift: optional float, specifies how many seconds are in between the starts of consecutive windows.
Equals `cut_duration` by default.
:param keep_shorter_windows: bool, when True, the last window will be used to create a MonoCut even if
its duration is shorter than `cut_duration`.
:return: a CutSet object.
"""
if cut_shift is None:
cut_shift = cut_duration
round_fn = ceil if keep_shorter_windows else floor
cuts = []
for features in feature_set:
# Determine the number of cuts, depending on `keep_shorter_windows` argument.
# When its true, we'll want to include the residuals in the output; otherwise,
# we provide only full duration cuts.
n_cuts = round_fn(features.duration / cut_shift)
if (
(n_cuts - 1) * cut_shift + cut_duration > features.duration
and not keep_shorter_windows
):
n_cuts -= 1
for idx in range(n_cuts):
offset = features.start + idx * cut_shift
duration = min(cut_duration, features.end - offset)
cuts.append(
MonoCut(
id=str(uuid4()),
start=offset,
duration=duration,
channel=features.channels,
features=features,
supervisions=[],
)
)
return CutSet.from_cuts(cuts)
def mix(
reference_cut: Cut,
mixed_in_cut: Cut,
offset: Seconds = 0,
snr: Optional[Decibels] = None,
preserve_id: Optional[str] = None,
) -> MixedCut:
"""
Overlay, or mix, two cuts. Optionally the ``mixed_in_cut`` may be shifted by ``offset`` seconds
and scaled down (positive SNR) or scaled up (negative SNR).
Returns a MixedCut, which contains both cuts and the mix information.
The actual feature mixing is performed during the call to :meth:`~MixedCut.load_features`.
:param reference_cut: The reference cut for the mix - offset and snr are specified w.r.t this cut.
:param mixed_in_cut: The mixed-in cut - it will be offset and rescaled to match the offset and snr parameters.
:param offset: How many seconds to shift the ``mixed_in_cut`` w.r.t. the ``reference_cut``.
:param snr: Desired SNR of the ``right_cut`` w.r.t. the ``left_cut`` in the mix.
:param preserve_id: optional string ("left", "right"). when specified, append will preserve the cut id
of the left- or right-hand side argument. otherwise, a new random id is generated.
:return: A :class:`~MixedCut` instance.
"""
if (
any(isinstance(cut, PaddingCut) for cut in (reference_cut, mixed_in_cut))
and snr is not None
):
warnings.warn(
"You are mixing cuts to a padding cut with a specified SNR - "
"the resulting energies would be extremely low or high. "
"We are setting snr to None, so that the original signal energies will be retained instead."
)
snr = None
if reference_cut.num_features is not None:
assert reference_cut.num_features == mixed_in_cut.num_features, (
"Cannot mix cuts with different feature " "dimensions. "
)
assert offset <= reference_cut.duration, (
f"Cannot mix cut '{mixed_in_cut.id}' with offset {offset},"
f" which is greater than cuts {reference_cut.id} duration"
f" of {reference_cut.duration}"
)
assert reference_cut.sampling_rate == mixed_in_cut.sampling_rate, (
f"Cannot mix cuts with different sampling rates "
f"({reference_cut.sampling_rate} vs. "
f"{mixed_in_cut.sampling_rate}). "
f"Please resample the recordings first."
)
# Determine the ID of the result.
if preserve_id is None:
mixed_cut_id = str(uuid4())
elif preserve_id == "left":
mixed_cut_id = reference_cut.id
elif preserve_id == "right":
mixed_cut_id = mixed_in_cut.id
else:
raise ValueError(
"Unexpected value for 'preserve_id' argument: "
f"got '{preserve_id}', expected one of (None, 'left', 'right')."
)
# When the left_cut is a MixedCut, take its existing tracks, otherwise create a new track.
old_tracks = (
reference_cut.tracks
if isinstance(reference_cut, MixedCut)
else [MixTrack(cut=reference_cut)]
)
# When the right_cut is a MixedCut, adapt its existing tracks with the new offset and snr,
# otherwise create a new track.
new_tracks = (
[
MixTrack(
cut=track.cut,
offset=round(track.offset + offset, ndigits=8),
snr=(
# When no new SNR is specified, retain whatever was there in the first place.
track.snr
if snr is None
# When new SNR is specified but none was specified before, assign the new SNR value.
else snr
if track.snr is None
# When both new and previous SNR were specified, assign their sum,
# as the SNR for each track is defined with regard to the first track energy.
else track.snr + snr
if snr is not None and track is not None
# When no SNR was specified whatsoever, use none.
else None
),
)
for track in mixed_in_cut.tracks
]
if isinstance(mixed_in_cut, MixedCut)
else [MixTrack(cut=mixed_in_cut, offset=offset, snr=snr)]
)
return MixedCut(id=mixed_cut_id, tracks=old_tracks + new_tracks)
def pad(
cut: Cut,
duration: Seconds = None,
num_frames: int = None,
num_samples: int = None,
pad_feat_value: float = LOG_EPSILON,
direction: str = "right",
preserve_id: bool = False,
pad_value_dict: Optional[Dict[str, Union[int, float]]] = None,
) -> Cut:
"""
Return a new MixedCut, padded with zeros in the recording, and ``pad_feat_value`` in each feature bin.
The user can choose to pad either to a specific `duration`; a specific number of frames `max_frames`;
or a specific number of samples `num_samples`. The three arguments are mutually exclusive.
:param cut: MonoCut to be padded.
:param duration: The cut's minimal duration after padding.
:param num_frames: The cut's total number of frames after padding.
:param num_samples: The cut's total number of samples after padding.
:param pad_feat_value: A float value that's used for padding the features.
By default we assume a log-energy floor of approx. -23 (1e-10 after exp).
:param direction: string, 'left', 'right' or 'both'. Determines whether the padding is added before or after
the cut.
:param preserve_id: When ``True``, preserves the cut ID before padding.
Otherwise, a new random ID is generated for the padded cut (default).
:param pad_value_dict: Optional dict that specifies what value should be used
for padding arrays in custom attributes.
:return: a padded MixedCut if duration is greater than this cut's duration, otherwise ``self``.
"""
assert exactly_one_not_null(duration, num_frames, num_samples), (
f"Expected only one of (duration, num_frames, num_samples) to be set: "
f"got ({duration}, {num_frames}, {num_samples})"
)
if hasattr(cut, "custom") and isinstance(cut.custom, dict):
from lhotse.array import TemporalArray
arr_keys = [k for k, v in cut.custom.items() if isinstance(v, TemporalArray)]
if len(arr_keys) > 0:
padding_values_specified = (
pad_value_dict is not None
and all(k in pad_value_dict for k in arr_keys),
)
if not padding_values_specified:
warnings.warn(
f"Cut being padded has custom TemporalArray attributes: {arr_keys}. "
f"We expected a 'pad_value_dict' argument with padding values for these attributes. "
f"We will proceed and use the default padding value (={DEFAULT_PADDING_VALUE})."
)
if duration is not None:
if duration <= cut.duration:
return cut
total_num_frames = (
compute_num_frames(
duration=duration,
frame_shift=cut.frame_shift,
sampling_rate=cut.sampling_rate,
)
if cut.has_features
else None
)
total_num_samples = (
compute_num_samples(duration=duration, sampling_rate=cut.sampling_rate)
if cut.has_recording
else None
)
if num_frames is not None:
assert cut.has_features, (
"Cannot pad a cut using num_frames when it is missing pre-computed features "
"(did you run cut.compute_and_store_features(...)?)."
)
total_num_frames = num_frames
duration = total_num_frames * cut.frame_shift
total_num_samples = (
compute_num_samples(duration=duration, sampling_rate=cut.sampling_rate)
if cut.has_recording
else None
)
# It is possible that two cuts have the same number of frames,
# but they differ in the number of samples.
# In that case, we need to pad them anyway so that they have truly equal durations.
if (
total_num_frames <= cut.num_frames
and duration <= cut.duration
and (total_num_samples is None or total_num_samples <= cut.num_samples)
):
return cut
if num_samples is not None:
assert cut.has_recording, (
"Cannot pad a cut using num_samples when it is missing a Recording object "
"(did you attach recording/recording set when creating the cut/cut set?)"
)
if num_samples <= cut.num_samples:
return cut
total_num_samples = num_samples
duration = total_num_samples / cut.sampling_rate
total_num_frames = (
compute_num_frames(
duration=duration,
frame_shift=cut.frame_shift,
sampling_rate=cut.sampling_rate,
)
if cut.has_features
else None
)
padding_cut = PaddingCut(
id=str(uuid4()),
duration=round(duration - cut.duration, ndigits=8),
feat_value=pad_feat_value,
num_features=cut.num_features,
# The num_frames and sampling_rate fields are tricky, because it is possible to create a MixedCut
# from Cuts that have different sampling rates and frame shifts. In that case, we are assuming
# that we should use the values from the reference cut, i.e. the first one in the mix.
num_frames=(total_num_frames - cut.num_frames if cut.has_features else None),
num_samples=(
total_num_samples - cut.num_samples if cut.has_recording else None
),
frame_shift=cut.frame_shift,
sampling_rate=cut.sampling_rate,
custom=pad_value_dict,
)
if direction == "right":
padded = cut.append(padding_cut, preserve_id="left" if preserve_id else None)
elif direction == "left":
padded = padding_cut.append(cut, preserve_id="right" if preserve_id else None)
elif direction == "both":
padded = (
padding_cut.truncate(duration=padding_cut.duration / 2)
.append(cut, preserve_id="right" if preserve_id else None)
.append(
padding_cut.truncate(duration=padding_cut.duration / 2),
preserve_id="left" if preserve_id else None,
)
)
else:
raise ValueError(f"Unknown type of padding: {direction}")
return padded
def append(
left_cut: Cut,
right_cut: Cut,
snr: Optional[Decibels] = None,
preserve_id: Optional[str] = None,
) -> MixedCut:
"""Helper method for functional-style appending of Cuts."""
return left_cut.append(right_cut, snr=snr, preserve_id=preserve_id)
def mix_cuts(cuts: Iterable[Cut]) -> MixedCut:
"""Return a MixedCut that consists of the input Cuts mixed with each other as-is."""
# The following is a fold (accumulate/aggregate) operation; it starts with cuts[0], and mixes it with cuts[1];
# then takes their mix and mixes it with cuts[2]; and so on.
return reduce(mix, cuts)
def append_cuts(cuts: Iterable[Cut]) -> Cut:
"""Return a MixedCut that consists of the input Cuts appended to each other as-is."""
# The following is a fold (accumulate/aggregate) operation; it starts with cuts[0], and appends cuts[1] to it;
# then takes their it concatenation and appends cuts[2] to it; and so on.
return reduce(append, cuts)
def compute_supervisions_frame_mask(
cut: Cut,
frame_shift: Optional[Seconds] = None,
use_alignment_if_exists: Optional[str] = None,
):
"""
Compute a mask that indicates which frames in a cut are covered by supervisions.
:param cut: a cut object.
:param frame_shift: optional frame shift in seconds; required when the cut does not have
pre-computed features, otherwise ignored.
:param use_alignment_if_exists: optional str (key from alignment dict); use the specified
alignment type for generating the mask
:returns a 1D numpy array with value 1 for **frames** covered by at least one supervision,
and 0 for **frames** not covered by any supervision.
"""
assert cut.has_features or frame_shift is not None, (
f"No features available. "
f"Either pre-compute features or provide frame_shift."
)
if cut.has_features:
frame_shift = cut.frame_shift
num_frames = cut.num_frames
else:
num_frames = compute_num_frames(
duration=cut.duration,
frame_shift=frame_shift,
sampling_rate=cut.sampling_rate,
)
mask = np.zeros(num_frames, dtype=np.float32)
for supervision in cut.supervisions:
if (
use_alignment_if_exists
and supervision.alignment
and use_alignment_if_exists in supervision.alignment
):
for ali in supervision.alignment[use_alignment_if_exists]:
st = round(ali.start / frame_shift) if ali.start > 0 else 0
et = (
round(ali.end / frame_shift)
if ali.end < cut.duration
else num_frames
)
mask[st:et] = 1.0
else:
st = round(supervision.start / frame_shift) if supervision.start > 0 else 0
et = (
round(supervision.end / frame_shift)
if supervision.end < cut.duration
else num_frames
)
mask[st:et] = 1.0
return mask
| 42.63588 | 154 | 0.603398 |
d5b4599697f43af6a9505c3c5d0eeedfa310efed | 407 | py | Python | aula6/sudp.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | aula6/sudp.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | aula6/sudp.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | import socket
import sys
porta = int(input('Digite a porta em que sera hospedado: '))
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
s.bind(('',porta))
except:
print('Erro de bind.')
sys.exit()
while True:
input('Aperte [Enter] para continuar...')
data,addr = s.recvfrom(1024)
print(f'o IP: ({addr}) enviou a mensagem: "{data}"')
print('O servidor encerrou')
s.close() | 20.35 | 60 | 0.653563 |
1a7e8a14d89cee5cb5e502341802538b07f66821 | 510 | py | Python | tests/test_testing.py | Kludex/fastapi-authorization | 67765b28bd350bfef83288c4e4c36c55725f255a | [
"MIT"
] | 16 | 2021-11-30T20:34:43.000Z | 2022-03-11T18:41:19.000Z | tests/test_testing.py | Kludex/fastapi-authorization | 67765b28bd350bfef83288c4e4c36c55725f255a | [
"MIT"
] | null | null | null | tests/test_testing.py | Kludex/fastapi-authorization | 67765b28bd350bfef83288c4e4c36c55725f255a | [
"MIT"
] | 1 | 2021-12-12T00:07:08.000Z | 2021-12-12T00:07:08.000Z | from fastapi import FastAPI
from fastapi_authorization.rbac import RBAC
from fastapi_authorization.testing import auto_test_protected_endpoints
auth = RBAC(lambda: "admin")
auth.add_role("admin", permissions=["user:create", "user:read"])
auth.add_role("superadmin", permissions=["admin:create", "admin:read"])
auth.add_role("user", permissions=["user:read"])
app = FastAPI()
@app.get("/", dependencies=[auth.Permission("user:read")])
def get_endpoint():
...
auto_test_protected_endpoints(app, auth)
| 25.5 | 71 | 0.752941 |
f9356ae00423de6cd2b377079f76def3f4e36f41 | 2,863 | py | Python | ping.py | JackShak/ping | 3fe6477a869050b5fcde9110d9d30611c4f00ead | [
"CC0-1.0"
] | null | null | null | ping.py | JackShak/ping | 3fe6477a869050b5fcde9110d9d30611c4f00ead | [
"CC0-1.0"
] | null | null | null | ping.py | JackShak/ping | 3fe6477a869050b5fcde9110d9d30611c4f00ead | [
"CC0-1.0"
] | null | null | null | #Создай собственный Шутер!
from pygame import *
from random import randint
finish=False
font.init()
font = font.Font(None, 36)
win=font.render('YOU WIN!', True, (255, 215, 0))
p=0
u=0
bullets = sprite.Group()
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, player_speed, player_width, player_height):
super().__init__()
self.image = transform.scale(image.load(player_image),(player_width, player_height))
self.player_speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
self.speed = player_speed
self.width= player_width
self.height=player_height
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def gol(self):
keys=key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < 750:
self.rect.y +=self.speed
def gor(self):
keys=key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < 750:
self.rect.y +=self.speed
ball = GameSprite("Daco_4417029.png", 500, 700, 10, 100, 100)
Rocket1 = Player("Tower_Dire_model.png", 10, 10, 10, 125, 300)
Rocket2 = Player("Tower_Radiant_model.png", 1400, 10, 10, 125, 300)
#создай окно игры
window = display.set_mode((1500, 900))
display.set_caption("ping")
background = transform.scale(
image.load("Minimap_7.29.png"),
(1500, 900)
)
speed_x = 10
speed_y = 10
o1=0
o2=0
events = event.get()
#задай фон сцены
clock= time.Clock()
FPS=300
clock.tick(FPS)
game = True
while game:
for e in event.get():
if e.type == QUIT:
game = False
clock.tick(FPS)
if finish != True:
window.blit(background, (0, 0))
Rocket1.reset()
Rocket1.gol()
Rocket2.reset()
Rocket2.gor()
ball.reset()
score1=font.render('Очки:' + str(o1), 1, (255, 215, 0))
score2=font.render('Очки:' + str(o2), 1, (255, 215, 0))
window.blit(score1, (1400, 50))
window.blit(score2, (0, 50))
display.update()
ball.rect.x += speed_x
ball.rect.y += speed_y
if sprite.collide_rect(Rocket1, ball) or sprite.collide_rect(Rocket2, ball):
speed_x*=-1
if ball.rect.y<0 or ball.rect.y>800:
speed_y*=-1
if ball.rect.x<0:
o1+=1
ball.kill()
ball = GameSprite("Daco_4417029.png", 500, 700, 10, 100, 100)
elif ball.rect.x>1500:
o2+=1
ball.kill()
ball = GameSprite("Daco_4417029.png", 500, 700, 10, 100, 100)
| 28.63 | 101 | 0.581558 |
92eabe98236677c31f4a7a279416a369036835c7 | 9,196 | py | Python | c7n/filters/vpc.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | 1 | 2018-06-27T14:51:12.000Z | 2018-06-27T14:51:12.000Z | c7n/filters/vpc.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | 2 | 2018-05-08T23:39:01.000Z | 2018-05-14T16:53:43.000Z | c7n/filters/vpc.py | CliffJumper/cloud-custodian | 47d2f0aa990d2179c8f6764ac53c12720069ddcb | [
"Apache-2.0"
] | 2 | 2019-11-22T14:54:28.000Z | 2021-06-18T13:49:15.000Z | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.exceptions import PolicyValidationError
from c7n.utils import local_session, type_schema
from .core import Filter, ValueFilter
from .related import RelatedResourceFilter
import jmespath
class SecurityGroupFilter(RelatedResourceFilter):
"""Filter a resource by its associated security groups."""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
AnnotationKey = "matched-security-groups"
class SubnetFilter(RelatedResourceFilter):
"""Filter a resource by its associated subnets."""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource': {'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
AnnotationKey = "matched-subnets"
class DefaultVpcBase(Filter):
"""Filter to resources in a default vpc."""
vpcs = None
default_vpc = None
permissions = ('ec2:DescribeVpcs',)
def match(self, vpc_id):
if self.default_vpc is None:
self.log.debug("querying default vpc %s" % vpc_id)
client = local_session(self.manager.session_factory).client('ec2')
vpcs = [v['VpcId'] for v
in client.describe_vpcs()['Vpcs']
if v['IsDefault']]
if vpcs:
self.default_vpc = vpcs.pop()
return vpc_id == self.default_vpc and True or False
class NetworkLocation(Filter):
"""On a network attached resource, determine intersection of
security-group attributes, subnet attributes, and resource attributes.
The use case is a bit specialized, for most use cases using `subnet`
and `security-group` filters suffice. but say for example you wanted to
verify that an ec2 instance was only using subnets and security groups
with a given tag value, and that tag was not present on the resource.
"""
schema = type_schema(
'network-location',
**{'missing-ok': {
'type': 'boolean',
'default': False,
'description': (
"How to handle missing keys on elements, by default this causes"
"resources to be considered not-equal")},
'match': {'type': 'string', 'enum': ['equal', 'not-equal'],
'default': 'non-equal'},
'compare': {
'type': 'array',
'description': (
'Which elements of network location should be considered when'
' matching.'),
'default': ['resource', 'subnet', 'security-group'],
'items': {
'enum': ['resource', 'subnet', 'security-group']}},
'key': {
'type': 'string',
'description': 'The attribute expression that should be matched on'},
'max-cardinality': {
'type': 'integer', 'default': 1,
'title': ''},
'ignore': {'type': 'array', 'items': {'type': 'object'}},
'required': ['key'],
})
permissions = ('ec2:DescribeSecurityGroups', 'ec2:DescribeSubnets')
def validate(self):
rfilters = self.manager.filter_registry.keys()
if 'subnet' not in rfilters:
raise PolicyValidationError(
"network-location requires resource subnet filter availability on %s" % (
self.manager.data))
if 'security-group' not in rfilters:
raise PolicyValidationError(
"network-location requires resource security-group filter availability on %s" % (
self.manager.data))
return self
def process(self, resources, event=None):
self.sg = self.manager.filter_registry.get('security-group')({}, self.manager)
related_sg = self.sg.get_related(resources)
self.subnet = self.manager.filter_registry.get('subnet')({}, self.manager)
related_subnet = self.subnet.get_related(resources)
self.sg_model = self.manager.get_resource_manager('security-group').get_model()
self.subnet_model = self.manager.get_resource_manager('subnet').get_model()
self.vf = self.manager.filter_registry.get('value')({}, self.manager)
# filter options
key = self.data.get('key')
self.compare = self.data.get('compare', ['subnet', 'security-group', 'resource'])
self.max_cardinality = self.data.get('max-cardinality', 1)
self.match = self.data.get('match', 'not-equal')
self.missing_ok = self.data.get('missing-ok', False)
results = []
for r in resources:
resource_sgs = self.filter_ignored(
[related_sg[sid] for sid in self.sg.get_related_ids([r])])
resource_subnets = self.filter_ignored([
related_subnet[sid] for sid in self.subnet.get_related_ids([r])])
found = self.process_resource(r, resource_sgs, resource_subnets, key)
if found:
results.append(found)
return results
def filter_ignored(self, resources):
ignores = self.data.get('ignore', ())
results = []
for r in resources:
found = False
for i in ignores:
for k, v in i.items():
if jmespath.search(k, r) == v:
found = True
if found is True:
break
if found is True:
continue
results.append(r)
return results
def process_resource(self, r, resource_sgs, resource_subnets, key):
evaluation = []
if 'subnet' in self.compare and resource_subnets:
subnet_values = {
rsub[self.subnet_model.id]: self.subnet.get_resource_value(key, rsub)
for rsub in resource_subnets}
if not self.missing_ok and None in subnet_values.values():
evaluation.append({
'reason': 'SubnetLocationAbsent',
'subnets': subnet_values})
subnet_space = set(filter(None, subnet_values.values()))
if len(subnet_space) > self.max_cardinality:
evaluation.append({
'reason': 'SubnetLocationCardinality',
'subnets': subnet_values})
if 'security-group' in self.compare and resource_sgs:
sg_values = {
rsg[self.sg_model.id]: self.sg.get_resource_value(key, rsg)
for rsg in resource_sgs}
if not self.missing_ok and None in sg_values.values():
evaluation.append({
'reason': 'SecurityGroupLocationAbsent',
'security-groups': sg_values})
sg_space = set(filter(None, sg_values.values()))
if len(sg_space) > self.max_cardinality:
evaluation.append({
'reason': 'SecurityGroupLocationCardinality',
'security-groups': sg_values})
if ('subnet' in self.compare and
'security-group' in self.compare and
sg_space != subnet_space):
evaluation.append({
'reason': 'LocationMismatch',
'subnets': subnet_values,
'security-groups': sg_values})
if 'resource' in self.compare:
r_value = self.vf.get_resource_value(key, r)
if not self.missing_ok and r_value is None:
evaluation.append({
'reason': 'ResourceLocationAbsent',
'resource': r_value})
elif 'security-group' in self.compare and resource_sgs and r_value not in sg_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'security-groups': sg_values})
elif 'subnet' in self.compare and resource_subnets and r_value not in subnet_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'subnet': subnet_values})
if evaluation and self.match == 'not-equal':
r['c7n:NetworkLocation'] = evaluation
return r
elif not evaluation and self.match == 'equal':
return r
| 39.982609 | 97 | 0.58906 |
db19ca2687a1dd4b7175bd4842689dc373bb4234 | 21,924 | py | Python | functionaltests/api/v1/functional/test_acls.py | dmend/barbican | 5ff7b4ca1474225acabc36acedcf70a41946e6d0 | [
"Apache-2.0"
] | 177 | 2015-01-02T09:35:53.000Z | 2022-02-26T01:43:55.000Z | functionaltests/api/v1/functional/test_acls.py | dmend/barbican | 5ff7b4ca1474225acabc36acedcf70a41946e6d0 | [
"Apache-2.0"
] | 3 | 2015-06-23T19:07:31.000Z | 2017-08-19T04:38:11.000Z | functionaltests/api/v1/functional/test_acls.py | dmend/barbican | 5ff7b4ca1474225acabc36acedcf70a41946e6d0 | [
"Apache-2.0"
] | 87 | 2015-01-13T17:33:40.000Z | 2021-11-09T05:30:36.000Z | # Copyright (c) 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testtools import testcase
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import acl_behaviors
from functionaltests.api.v1.behaviors import consumer_behaviors
from functionaltests.api.v1.behaviors import container_behaviors
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.models import acl_models
from functionaltests.api.v1.models import consumer_model
from functionaltests.api.v1.models import container_models
from functionaltests.api.v1.models import secret_models
from functionaltests.common import config
CONF = config.get_config()
admin_a = CONF.rbac_users.admin_a
creator_a = CONF.rbac_users.creator_a
observer_a = CONF.rbac_users.observer_a
auditor_a = CONF.rbac_users.auditor_a
admin_b = CONF.rbac_users.admin_b
observer_b = CONF.rbac_users.observer_b
def get_rbac_only():
return {'read': {'project-access': True}}
# private secret can only be access by the creator or an admin
def get_private():
return {'read': {'project-access': False}}
def get_acl_only(reader_id):
return {'read': {'users': [reader_id], 'project-access': False}}
def get_rbac_plus_acl(reader_id):
return {'read': {'users': [reader_id], 'project-access': True}}
test_data_read_secret_rbac_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
test_data_read_secret_private = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
test_data_read_secret_acl_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 200},
}
test_data_read_secret_rbac_plus_acl = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 200},
}
test_data_read_container_rbac_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'expected_return': 200},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
test_data_read_container_private = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
test_data_read_container_acl_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 200},
}
test_data_read_container_rbac_plus_acl = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'expected_return': 200},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 200},
}
test_data_read_container_consumer_acl_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 200},
'with_observer_a': {'user': observer_a, 'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'expected_return': 200},
'with_admin_b': {'user': admin_b, 'expected_return': 200},
'with_observer_b': {'user': observer_b, 'expected_return': 200},
}
test_data_delete_container_consumer_acl_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 403},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 403},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
test_data_create_container_consumer_acl_only = {
'with_admin_a': {'user': admin_a, 'expected_return': 200},
'with_creator_a': {'user': creator_a, 'expected_return': 403},
'with_observer_a': {'user': observer_a, 'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'expected_return': 403},
'with_admin_b': {'user': admin_b, 'expected_return': 200},
'with_observer_b': {'user': observer_b, 'expected_return': 403},
}
@utils.parameterized_test_case
class AclTestCase(base.TestCase):
"""Functional tests exercising ACL Features"""
def setUp(self):
super(AclTestCase, self).setUp()
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.container_behaviors = container_behaviors.ContainerBehaviors(
self.client)
self.acl_behaviors = acl_behaviors.AclBehaviors(self.client)
self.consumer_behaviors = consumer_behaviors.ConsumerBehaviors(
self.client)
def tearDown(self):
self.acl_behaviors.delete_all_created_acls()
self.secret_behaviors.delete_all_created_secrets()
self.container_behaviors.delete_all_created_containers()
self.consumer_behaviors.delete_all_created_consumers()
super(AclTestCase, self).tearDown()
@utils.parameterized_dataset(test_data_read_secret_rbac_only)
def test_secret_read_default(self, user, expected_return):
secret_ref = self.store_secret()
status = self.get_secret(secret_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_secret_rbac_only)
def test_secret_read_rbac_only(self, user, expected_return):
secret_ref = self.store_secret()
self.set_secret_acl(secret_ref, get_rbac_only())
status = self.get_secret(secret_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_secret_private)
def test_secret_read_private(self, user, expected_return):
secret_ref = self.store_secret()
self.set_secret_acl(secret_ref, get_private())
status = self.get_secret(secret_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_secret_acl_only)
def test_secret_read_acl_only(self, user, expected_return):
secret_ref = self.store_secret()
user_id = self.secret_behaviors.get_user_id_from_name(observer_b)
self.set_secret_acl(secret_ref, get_acl_only(user_id))
status = self.get_secret(secret_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_secret_rbac_plus_acl)
def test_secret_read_rbac_plus_acl(self, user, expected_return):
secret_ref = self.store_secret()
user_id = self.secret_behaviors.get_user_id_from_name(observer_b)
self.set_secret_acl(secret_ref, get_rbac_plus_acl(user_id))
status = self.get_secret(secret_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_rbac_only)
def test_container_read_default(self, user, expected_return):
container_ref = self.store_container()
status = self.get_container(container_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_rbac_only)
def test_container_read_rbac_only(self, user, expected_return):
container_ref = self.store_container()
self.set_container_acl(container_ref, get_rbac_only())
status = self.get_container(container_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_private)
def test_container_read_private(self, user, expected_return):
container_ref = self.store_container()
self.set_container_acl(container_ref, get_private())
status = self.get_container(container_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_acl_only)
def test_container_read_acl_only(self, user, expected_return):
container_ref = self.store_container()
user_id = self.container_behaviors.get_user_id_from_name(observer_b)
self.set_container_acl(container_ref, get_acl_only(user_id))
status = self.get_container(container_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_rbac_plus_acl)
def test_container_read_rbac_plus_acl(self, user, expected_return):
container_ref = self.store_container()
user_id = self.container_behaviors.get_user_id_from_name(observer_b)
self.set_container_acl(container_ref, get_rbac_plus_acl(user_id))
status = self.get_container(container_ref, user_name=user)
self.assertEqual(expected_return, status)
@utils.parameterized_dataset(test_data_read_container_consumer_acl_only)
def test_container_acl_read_consumers(self, user, expected_return):
"""Acl access will not allow you to see the list of consumers"""
container_ref = self.store_container(user_name=creator_a,
admin=admin_a)
consumer_model = get_consumer_model()
resp, consumer_data = self.consumer_behaviors.create_consumer(
model=consumer_model,
container_ref=container_ref,
user_name=admin_a)
self.assertEqual(200, resp.status_code)
user_id = self.container_behaviors.get_user_id_from_name(user)
self.set_container_acl(container_ref, get_acl_only(user_id))
# Verify all users granted acl access can read the container
status_code = self.get_container(container_ref, user_name=user)
self.assertEqual(200, status_code)
resp, consumers, next_ref, prev_ref = \
self.consumer_behaviors.get_consumers(container_ref,
user_name=user)
self.assertEqual(expected_return, resp.status_code)
@utils.parameterized_dataset(test_data_delete_container_consumer_acl_only)
def test_container_acl_remove_consumer(self, user, expected_return):
"""Acl access will not allow you to delete a consumer"""
container_ref = self.store_container(user_name=creator_a,
admin=admin_a)
consumer_model = get_consumer_model()
resp, consumer_data = self.consumer_behaviors.create_consumer(
model=consumer_model,
container_ref=container_ref,
user_name=admin_a)
self.assertEqual(200, resp.status_code)
user_id = self.container_behaviors.get_user_id_from_name(user)
self.set_container_acl(container_ref, get_acl_only(user_id))
# Verify all users granted acl access can read the container
status_code = self.get_container(container_ref, user_name=user)
self.assertEqual(200, status_code)
resp, consumer_data = self.consumer_behaviors.delete_consumer(
model=consumer_model,
container_ref=container_ref,
user_name=user)
self.assertEqual(expected_return, resp.status_code)
@utils.parameterized_dataset(test_data_create_container_consumer_acl_only)
def test_container_acl_create_consumer(self, user, expected_return):
"""Acl access will not allow you to add a consumer"""
container_ref = self.store_container(user_name=creator_a,
admin=admin_a)
user_id = self.container_behaviors.get_user_id_from_name(user)
self.set_container_acl(container_ref, get_acl_only(user_id))
# Verify all users granted acl access can read the container
status_code = self.get_container(container_ref, user_name=user)
self.assertEqual(200, status_code)
consumer_model = get_consumer_model()
resp, consumer_data = self.consumer_behaviors.create_consumer(
model=consumer_model,
container_ref=container_ref,
user_name=user)
self.assertEqual(expected_return, resp.status_code)
@testcase.attr('negative')
def test_secret_acl_auditor_with_acl_cannot_read(self):
"""Auditor granted access to a secret cannot read that secret"""
secret_ref = self.store_secret()
self.set_secret_acl(secret_ref, get_rbac_plus_acl(auditor_a))
status_code = self.get_secret(secret_ref=secret_ref,
user_name=auditor_a)
self.assertEqual(403, status_code)
@testcase.attr('negative')
def test_secret_acl_put_as_observer(self):
"""Observer can not put to a secret when granted access via acl"""
secret_no_payload = {
"name": "AES key",
"expiration": "2030-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
}
secret_model = secret_models.SecretModel(**secret_no_payload)
resp, secret_ref = self.secret_behaviors.create_secret(
model=secret_model,
user_name=creator_a)
self.set_secret_acl(secret_ref, get_rbac_plus_acl(observer_a))
# Update
payload = "gF6+lLoF3ohA9aPRpt+6bQ=="
payload_content_type = "application/octet-stream"
payload_content_encoding = "base64"
update_resp = self.secret_behaviors.update_secret_payload(
secret_ref,
user_name=observer_a,
payload=payload,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding)
self.assertEqual(403, update_resp.status_code)
# ----------------------- Secret ACL Tests ---------------------------
@testcase.attr('negative', 'security')
def test_secret_read_acl_no_token(self):
secret_ref = self.store_secret()
acl_ref = '{0}/acl'.format(secret_ref)
resp = self.acl_behaviors.get_acl(acl_ref, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_set_acl_no_token(self):
secret_ref = self.store_secret()
resp = self.set_secret_acl(secret_ref, get_rbac_only(), use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_delete_acl_no_token(self):
secret_ref = self.store_secret()
acl_ref = '{0}/acl'.format(secret_ref)
resp = self.acl_behaviors.delete_acl(
acl_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_secret_update_acl_no_token(self):
secret_ref = self.store_secret()
acl_ref = '{0}/acl'.format(secret_ref)
resp = self.set_secret_acl(secret_ref, get_rbac_only())
self.assertEqual(200, resp.status_code)
resp = self.acl_behaviors.update_acl(acl_ref, {}, use_auth=False)
self.assertEqual(401, resp.status_code)
# ----------------------- Container ACL Tests ---------------------------
@testcase.attr('negative', 'security')
def test_container_read_acl_no_token(self):
container_ref = self.store_container()
acl_ref = '{0}/acl'.format(container_ref)
resp = self.acl_behaviors.get_acl(acl_ref, use_auth=False)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_container_set_acl_no_token(self):
container_ref = self.store_container()
resp = self.set_container_acl(
container_ref, get_rbac_only(), use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_container_delete_acl_no_token(self):
container_ref = self.store_container()
acl_ref = '{0}/acl'.format(container_ref)
resp = self.acl_behaviors.delete_acl(
acl_ref, expected_fail=True, use_auth=False
)
self.assertEqual(401, resp.status_code)
@testcase.attr('negative', 'security')
def test_container_update_acl_no_token(self):
container_ref = self.store_container()
acl_ref = '{0}/acl'.format(container_ref)
resp = self.set_container_acl(container_ref, get_rbac_only())
self.assertEqual(200, resp.status_code)
resp = self.acl_behaviors.update_acl(acl_ref, {}, use_auth=False)
self.assertEqual(401, resp.status_code)
# ----------------------- Helper Functions ---------------------------
def store_secret(self, user_name=creator_a, admin=admin_a):
test_model = secret_models.SecretModel(
**get_default_secret_data())
resp, secret_ref = self.secret_behaviors.create_secret(
test_model, user_name=user_name, admin=admin)
self.assertEqual(201, resp.status_code)
return secret_ref
def get_secret(self, secret_ref, user_name=creator_a):
resp = self.secret_behaviors.get_secret(
secret_ref, 'application/octet-stream',
user_name=user_name)
return resp.status_code
def set_secret_acl(self, secret_ref, acl, use_auth=True,
user_name=creator_a):
test_model = acl_models.AclModel(**acl)
resp = self.acl_behaviors.create_acl(
secret_ref, test_model, use_auth=use_auth, user_name=user_name)
if use_auth:
self.assertEqual(200, resp.status_code)
return resp
def store_container(self, user_name=creator_a, admin=admin_a):
secret_ref = self.store_secret(user_name=user_name, admin=admin)
test_model = container_models.ContainerModel(
**get_container_req(secret_ref))
resp, container_ref = self.container_behaviors.create_container(
test_model, user_name=user_name, admin=admin)
self.assertEqual(201, resp.status_code)
return container_ref
def get_container(self, container_ref, user_name=creator_a):
resp = self.container_behaviors.get_container(
container_ref, user_name=user_name)
return resp.status_code
def set_container_acl(self, container_ref, acl, use_auth=True,
user_name=creator_a):
test_model = acl_models.AclModel(**acl)
resp = self.acl_behaviors.create_acl(
container_ref, test_model, use_auth=use_auth, user_name=user_name)
if use_auth:
self.assertEqual(200, resp.status_code)
return resp
# ----------------------- Support Functions ---------------------------
def get_default_secret_data():
return {
"name": "AES key",
"expiration": "2050-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload": get_default_payload(),
"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
}
def get_default_payload():
return 'Z0Y2K2xMb0Yzb2hBOWFQUnB0KzZiUT09'
def get_container_req(secret_ref):
return {"name": "testcontainer",
"type": "generic",
"secret_refs": [{'name': 'secret1', 'secret_ref': secret_ref}]}
def get_consumer_model():
test_consumer_model = consumer_model.ConsumerModel(
name="consumername",
URL="consumerURL"
)
return test_consumer_model
| 42.820313 | 79 | 0.693441 |
5388e013f7cda73b5341e5701c6155f6cb8997ce | 976 | py | Python | isi_sdk_8_2_2/test/test_network_groupnets_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/test/test_network_groupnets_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/test/test_network_groupnets_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.network_groupnets_extended import NetworkGroupnetsExtended # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestNetworkGroupnetsExtended(unittest.TestCase):
"""NetworkGroupnetsExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetworkGroupnetsExtended(self):
"""Test NetworkGroupnetsExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.network_groupnets_extended.NetworkGroupnetsExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.804878 | 106 | 0.727459 |
91f4c5f4f4df9373d41f8c7438375bb4ccc1fafc | 731 | py | Python | src/problem5.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | src/problem5.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | src/problem5.py | aitc-h/euler | 6fc07c741c31a632ce6f11f65c11007cd6c7eb29 | [
"MIT"
] | null | null | null | """
Problem 5
Smallest multiple
"""
from utility.decorators import timeit, printit
from utility import math_f
def convert(l):
out = {}
for i in l:
if i in out:
out[i] += 1
else:
out[i] = 1
return out
def redu(d):
total = 1
for i in d:
total *= (i**d[i])
return total
@printit
@timeit
def run(n):
primes = math_f.sieve_of_eratosthenes(n)
freq = {i: 0 for i in primes}
for i in range(2, n):
x = math_f.get_prime_divisors(i)
y = convert(x)
for key in y:
if freq[key] < y[key]:
freq[key] = y[key]
ans = redu(freq)
return ans
if __name__ == "__main__":
n = 20
run(n)
| 16.244444 | 46 | 0.515732 |
c0dce64f3d30b2d7bdbb3505630148d3037860bf | 1,940 | py | Python | src/sc_studio/ccd_image_view.py | nkming2/sc-studio | 0261e8ac19ade6fe48ca8f1184321d0b89fb9a7f | [
"MIT"
] | null | null | null | src/sc_studio/ccd_image_view.py | nkming2/sc-studio | 0261e8ac19ade6fe48ca8f1184321d0b89fb9a7f | [
"MIT"
] | null | null | null | src/sc_studio/ccd_image_view.py | nkming2/sc-studio | 0261e8ac19ade6fe48ca8f1184321d0b89fb9a7f | [
"MIT"
] | null | null | null | '''
sc_studio.ccd_image_view
Author: Ming Tsang
Copyright (c) 2014-2015 HKUST SmartCar Team
Refer to LICENSE for details
'''
import binascii
import logging
import time
import tkinter
from tkinter import Tk, Text
from sc_studio import config
from sc_studio.view import View
class CcdImageView(View):
def __init__(self, params):
super(CcdImageView, self).__init__(params)
self._ccd_id = int(params["ccd_id"]) if "ccd_id" in params else 0
self._threshold = params["threshold"] if "threshold" in params else 128
self._tk = Tk()
self._text = Text(self._tk, width = 128, bg = config.COL_GREY_900,
fg = config.COL_GREY_100, font = (config.FONT, 5))
self._tk.title("CCD image view [" + str(self._ccd_id) + ']')
self._text.pack(side = tkinter.LEFT, fill = tkinter.Y)
self._tk.protocol("WM_DELETE_WINDOW", self.on_press_close)
self._file = open("ccd_image_" + str(self._ccd_id) + '_' \
+ str(int(time.time() * 1000)) + ".txt", "w")
def run(self):
super(CcdImageView, self).run()
self._tk.mainloop()
def on_new_input(self):
try:
hex_str = self.get_input()
if int(hex_str[0:2], 16) != self._ccd_id:
return
line = self._get_line(hex_str[2:])
except Exception as e:
logging.debug(str(e))
return
string = line.decode("UTF-8")
self._text.insert(tkinter.END, string)
self._text.insert(tkinter.END, '\n')
while self._text.yview()[1] != 1.0:
self._text.delete(1.0, 2.0)
self._file.write(time.strftime("[%x %X] "))
self._file.write(string)
self._file.write('\n')
def on_dismiss(self):
self._tk.after_idle(self.on_press_close)
def on_press_close(self):
self._tk.destroy()
self.join_io_thread()
def _get_line(self, hex_str):
try:
hex_data = binascii.unhexlify(hex_str)
except TypeError as e:
logging.debug(str(e))
return
line = bytearray(128)
for i in range(128):
line[i] = ord('#') if hex_data[i] > self._threshold else ord(' ')
return line
| 24.871795 | 73 | 0.687113 |
db9ac49929e44948ee73a990762848f6de2d681e | 26,556 | py | Python | recipes/arrow/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | recipes/arrow/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-22T13:54:48.000Z | 2021-11-22T14:09:45.000Z | recipes/arrow/all/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class ArrowConan(ConanFile):
name = "arrow"
description = "Apache Arrow is a cross-language development platform for in-memory data"
topics = ("arrow", "memory")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://arrow.apache.org/"
license = ("Apache-2.0",)
generators = "cmake", "cmake_find_package_multi"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"gandiva": [True, False],
"parquet": [True, False],
"plasma": [True, False],
"cli": [True, False],
"compute": ["auto", True, False],
"dataset_modules": [True, False],
"deprecated": [True, False],
"encryption": [True, False],
"filesystem_layer": [True, False],
"hdfs_bridgs": [True, False],
"simd_level": [None, "default", "sse4_2", "avx2", "avx512", "neon", ],
"runtime_simd_level": [None, "sse4_2", "avx2", "avx512", "max"],
"with_backtrace": [True, False],
"with_boost": ["auto", True, False],
"with_csv": [True, False],
"with_cuda": [True, False],
"with_flight_rpc": [True, False],
"with_gflags": ["auto", True, False],
"with_glog": ["auto", True, False],
"with_grpc": ["auto", True, False],
"with_hiveserver2": [True, False],
"with_jemalloc": ["auto", True, False],
"with_json": [True, False],
"with_llvm": ["auto", True, False],
"with_openssl": ["auto", True, False],
"with_orc": [True, False],
"with_protobuf": ["auto", True, False],
"with_re2": ["auto", True, False],
"with_s3": [True, False],
"with_utf8proc": ["auto", True, False],
"with_brotli": [True, False],
"with_bz2": [True, False],
"with_lz4": [True, False],
"with_snappy": [True, False],
"with_zlib": [True, False],
"with_zstd": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"gandiva": False,
"parquet": False,
"plasma": False,
"cli": False,
"compute": "auto",
"dataset_modules": False,
"deprecated": True,
"encryption": False,
"filesystem_layer": False,
"hdfs_bridgs": False,
"simd_level": "default",
"runtime_simd_level": "max",
"with_backtrace": False,
"with_boost": "auto",
"with_brotli": False,
"with_bz2": False,
"with_csv": False,
"with_cuda": False,
"with_flight_rpc": False,
"with_gflags": "auto",
"with_jemalloc": "auto",
"with_glog": "auto",
"with_grpc": "auto",
"with_hiveserver2": False,
"with_json": False,
"with_llvm": "auto",
"with_openssl": "auto",
"with_orc": False,
"with_protobuf": "auto",
"with_re2": "auto",
"with_s3": False,
"with_utf8proc": "auto",
"with_lz4": False,
"with_snappy": False,
"with_zlib": False,
"with_zstd": False,
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if tools.Version(self.version) < "2.0.0":
del self.options.simd_level
del self.options.runtime_simd_level
elif tools.Version(self.version) < "6.0.0":
self.options.simd_level = "sse4_2"
def validate(self):
if self.settings.compiler == "clang" and self.settings.compiler.version <= tools.Version("3.9"):
raise ConanInvalidConfiguration("This recipe does not support this compiler version")
if self.options.shared:
del self.options.fPIC
if self.options.compute == False and not self._compute(True):
raise ConanInvalidConfiguration("compute options is required (or choose auto)")
if self.options.with_jemalloc == False and self._with_jemalloc(True):
raise ConanInvalidConfiguration("with_jemalloc option is required (or choose auto)")
if self.options.with_re2 == False and self._with_re2(True):
raise ConanInvalidConfiguration("with_re2 option is required (or choose auto)")
if self.options.with_protobuf == False and self._with_protobuf(True):
raise ConanInvalidConfiguration("with_protobuf option is required (or choose auto)")
if self.options.with_gflags == False and self._with_gflags(True):
raise ConanInvalidConfiguration("with_gflags options is required (or choose auto)")
if self.options.with_grpc == False and self._with_grpc(True):
raise ConanInvalidConfiguration("with_grpc options is required (or choose auto)")
if self.options.with_boost == False and self._with_boost(True):
raise ConanInvalidConfiguration("with_boost options is required (or choose auto)")
if self.options.with_openssl == False and self._with_openssl(True):
raise ConanInvalidConfiguration("with_openssl options is required (or choose auto)")
if self.options.with_llvm == False and self._with_llvm(True):
raise ConanInvalidConfiguration("with_openssl options is required (or choose auto)")
if self.options.with_cuda:
raise ConanInvalidConfiguration("CCI has no cuda recipe (yet)")
if self.options.with_hiveserver2:
raise ConanInvalidConfiguration("CCI has no hiveserver2 recipe (yet)")
if self.options.with_orc:
raise ConanInvalidConfiguration("CCI has no orc recipe (yet)")
if self.options.shared and self._with_jemalloc():
if self.options["jemalloc"].enable_cxx:
raise ConanInvalidConfiguration("jemmalloc.enable_cxx of a static jemalloc must be disabled")
if tools.Version(self.version) < "6.0.0" and self.options.get_safe("simd_level") == "default":
raise ConanInvalidConfiguration("In {}/{}, simd_level options is not supported `default` value.".format(self.name, self.version))
def _compute(self, required=False):
if required or self.options.compute == "auto":
return bool(self.options.dataset_modules)
else:
return bool(self.options.compute)
def _with_jemalloc(self, required=False):
if required or self.options.with_jemalloc == "auto":
return bool("BSD" in str(self.settings.os))
else:
return bool(self.options.with_jemalloc)
def _with_re2(self, required=False):
if required or self.options.with_re2 == "auto":
return bool(self.options.gandiva)
else:
return bool(self.options.with_re2)
def _with_protobuf(self, required=False):
if required or self.options.with_protobuf == "auto":
return bool(self.options.gandiva or self.options.with_flight_rpc or self.options.with_orc)
else:
return bool(self.options.with_protobuf)
def _with_gflags(self, required=False):
if required or self.options.with_gflags == "auto":
return bool(self.options.plasma or self._with_glog() or self._with_grpc())
else:
return bool(self.options.with_gflags)
def _with_glog(self, required=False):
if required or self.options.with_glog == "auto":
return False
else:
return bool(self.options.with_glog)
def _with_grpc(self, required=False):
if required or self.options.with_grpc == "auto":
return bool(self.options.with_flight_rpc)
else:
return bool(self.options.with_grpc)
def _with_boost(self, required=False):
if required or self.options.with_boost == "auto":
if self.options.gandiva:
return True
version = tools.Version(self.version)
if version.major == "1":
if self.options.parquet and self.settings.compiler == "gcc" and self.settings.compiler.version < tools.Version("4.9"):
return True
elif version.major >= "2":
if self.settings.compiler == "Visual Studio":
return True
return False
else:
return bool(self.options.with_boost)
def _with_thrift(self, required=False):
# No self.options.with_thift exists
return bool(required or self.options.with_hiveserver2 or self.options.parquet)
def _with_utf8proc(self, required=False):
if required or self.options.with_utf8proc == "auto":
return False
else:
return bool(self.options.with_utf8proc)
def _with_llvm(self, required=False):
if required or self.options.with_llvm == "auto":
return bool(self.options.gandiva)
else:
return bool(self.options.with_openssl)
def _with_openssl(self, required=False):
if required or self.options.with_openssl == "auto":
return bool(self.options.encryption or self.options.with_flight_rpc or self.options.with_s3)
else:
return bool(self.options.with_openssl)
def requirements(self):
if self._with_thrift():
self.requires("thrift/0.15.0")
if self._with_protobuf():
self.requires("protobuf/3.19.2")
if self._with_jemalloc():
self.requires("jemalloc/5.2.1")
if self._with_boost():
self.requires("boost/1.78.0")
if self._with_gflags():
self.requires("gflags/2.2.2")
if self._with_glog():
self.requires("glog/0.5.0")
if self._with_grpc():
self.requires("grpc/1.44.0")
if self.options.with_json:
self.requires("rapidjson/1.1.0")
if self._with_llvm():
self.requires("llvm-core/13.0.0")
if self._with_openssl():
self.requires("openssl/1.1.1m")
if self.options.with_s3:
self.requires("aws-sdk-cpp/1.9.100")
if self.options.with_brotli:
self.requires("brotli/1.0.9")
if self.options.with_bz2:
self.requires("bzip2/1.0.8")
if self.options.with_lz4:
self.requires("lz4/1.9.3")
if self.options.with_snappy:
self.requires("snappy/1.1.9")
if tools.Version(self.version) >= "6.0.0" and \
self.options.get_safe("simd_level") != None or \
self.options.get_safe("runtime_simd_level") != None:
self.requires("xsimd/8.0.3")
if self.options.with_zlib:
self.requires("zlib/1.2.11")
if self.options.with_zstd:
self.requires("zstd/1.5.2")
if self._with_re2():
self.requires("re2/20211101")
if self._with_utf8proc():
self.requires("utf8proc/2.7.0")
if self.options.with_backtrace:
self.requires("libbacktrace/cci.20210118")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if tools.cross_building(self.settings):
cmake_system_processor = {
"armv8": "aarch64",
"armv8.3": "aarch64",
}.get(str(self.settings.arch), str(self.settings.arch))
self._cmake.definitions["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["ARROW_USE_STATIC_CRT"] = "MT" in str(self.settings.compiler.runtime)
self._cmake.definitions["ARROW_DEPENDENCY_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_GANDIVA"] = self.options.gandiva
self._cmake.definitions["ARROW_PARQUET"] = self.options.parquet
self._cmake.definitions["ARROW_PLASMA"] = self.options.plasma
self._cmake.definitions["ARROW_DATASET"] = self.options.dataset_modules
self._cmake.definitions["ARROW_FILESYSTEM"] = self.options.filesystem_layer
self._cmake.definitions["PARQUET_REQUIRE_ENCRYPTION"] = self.options.encryption
self._cmake.definitions["ARROW_HDFS"] = self.options.hdfs_bridgs
self._cmake.definitions["ARROW_VERBOSE_THIRDPARTY_BUILD"] = True
self._cmake.definitions["ARROW_BUILD_SHARED"] = self.options.shared
self._cmake.definitions["ARROW_BUILD_STATIC"] = not self.options.shared
self._cmake.definitions["ARROW_NO_DEPRECATED_API"] = not self.options.deprecated
self._cmake.definitions["ARROW_FLIGHT"] = self.options.with_flight_rpc
self._cmake.definitions["ARROW_HIVESERVER2"] = self.options.with_hiveserver2
self._cmake.definitions["ARROW_COMPUTE"] = self._compute()
self._cmake.definitions["ARROW_CSV"] = self.options.with_csv
self._cmake.definitions["ARROW_CUDA"] = self.options.with_cuda
self._cmake.definitions["ARROW_JEMALLOC"] = self._with_jemalloc()
self._cmake.definitions["ARROW_JSON"] = self.options.with_json
self._cmake.definitions["BOOST_SOURCE"] = "SYSTEM"
self._cmake.definitions["Protobuf_SOURCE"] = "SYSTEM"
if self._with_protobuf():
self._cmake.definitions["ARROW_PROTOBUF_USE_SHARED"] = self.options["protobuf"].shared
self._cmake.definitions["gRPC_SOURCE"] = "SYSTEM"
if self._with_grpc():
self._cmake.definitions["ARROW_GRPC_USE_SHARED"] = self.options["grpc"].shared
self._cmake.definitions["ARROW_HDFS"] = self.options.hdfs_bridgs
self._cmake.definitions["ARROW_USE_GLOG"] = self._with_glog()
self._cmake.definitions["GLOG_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_WITH_BACKTRACE"] = self.options.with_backtrace
self._cmake.definitions["ARROW_WITH_BROTLI"] = self.options.with_brotli
self._cmake.definitions["Brotli_SOURCE"] = "SYSTEM"
if self.options.with_brotli:
self._cmake.definitions["ARROW_BROTLI_USE_SHARED"] = self.options["brotli"].shared
self._cmake.definitions["gflags_SOURCE"] = "SYSTEM"
if self._with_gflags():
self._cmake.definitions["ARROW_BROTLI_USE_SHARED"] = self.options["gflags"].shared
self._cmake.definitions["ARROW_WITH_BZ2"] = self.options.with_bz2
self._cmake.definitions["BZip2_SOURCE"] = "SYSTEM"
if self.options.with_bz2:
self._cmake.definitions["ARROW_BZ2_USE_SHARED"] = self.options["bzip2"].shared
self._cmake.definitions["ARROW_WITH_LZ4"] = self.options.with_lz4
self._cmake.definitions["Lz4_SOURCE"] = "SYSTEM"
if self.options.with_lz4:
self._cmake.definitions["ARROW_LZ4_USE_SHARED"] = self.options["lz4"].shared
self._cmake.definitions["ARROW_WITH_SNAPPY"] = self.options.with_snappy
self._cmake.definitions["Snappy_SOURCE"] = "SYSTEM"
if self.options.with_snappy:
self._cmake.definitions["ARROW_SNAPPY_USE_SHARED"] = self.options["snappy"].shared
self._cmake.definitions["ARROW_WITH_ZLIB"] = self.options.with_zlib
self._cmake.definitions["RE2_SOURCE"] = "SYSTEM"
self._cmake.definitions["ZLIB_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_WITH_ZSTD"] = self.options.with_zstd
if tools.Version(self.version) >= "2.0":
self._cmake.definitions["zstd_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_SIMD_LEVEL"] = str(self.options.simd_level).upper()
self._cmake.definitions["ARROW_RUNTIME_SIMD_LEVEL"] = str(self.options.runtime_simd_level).upper()
else:
self._cmake.definitions["ZSTD_SOURCE"] = "SYSTEM"
if self.options.with_zstd:
self._cmake.definitions["ARROW_ZSTD_USE_SHARED"] = self.options["zstd"].shared
self._cmake.definitions["ORC_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_WITH_THRIFT"] = self._with_thrift()
self._cmake.definitions["Thrift_SOURCE"] = "SYSTEM"
self._cmake.definitions["THRIFT_VERSION"] = "1.0" # a recent thrift does not require boost
if self._with_thrift():
self._cmake.definitions["ARROW_THRIFT_USE_SHARED"] = self.options["thrift"].shared
self._cmake.definitions["ARROW_USE_OPENSSL"] = self._with_openssl()
if self._with_openssl():
self._cmake.definitions["OPENSSL_ROOT_DIR"] = self.deps_cpp_info["openssl"].rootpath.replace("\\", "/")
self._cmake.definitions["ARROW_OPENSSL_USE_SHARED"] = self.options["openssl"].shared
if self._with_boost():
self._cmake.definitions["ARROW_BOOST_USE_SHARED"] = self.options["boost"].shared
self._cmake.definitions["ARROW_S3"] = self.options.with_s3
self._cmake.definitions["AWSSDK_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_BUILD_UTILITIES"] = self.options.cli
self._cmake.definitions["ARROW_BUILD_INTEGRATION"] = False
self._cmake.definitions["ARROW_INSTALL_NAME_RPATH"] = False
self._cmake.definitions["ARROW_BUILD_EXAMPLES"] = False
self._cmake.definitions["ARROW_BUILD_TESTS"] = False
self._cmake.definitions["ARROW_ENABLE_TIMING_TESTS"] = False
self._cmake.definitions["ARROW_BUILD_BENCHMARKS"] = False
self._cmake.definitions["LLVM_SOURCE"] = "SYSTEM"
self._cmake.definitions["ARROW_WITH_UTF8PROC"] = self._with_utf8proc()
self._cmake.definitions["utf8proc_SOURCE"] = "SYSTEM"
if self._with_utf8proc():
self._cmake.definitions["ARROW_UTF8PROC_USE_SHARED"] = self.options["utf8proc"].shared
self._cmake.definitions["BUILD_WARNING_LEVEL"] = "PRODUCTION"
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["ARROW_USE_STATIC_CRT"] = "MT" in str(self.settings.compiler.runtime)
if self._with_llvm():
self._cmake.definitions["LLVM_DIR"] = self.deps_cpp_info["llvm-core"].rootpath.replace("\\", "/")
self._cmake.configure()
return self._cmake
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE.txt", src=self._source_subfolder, dst="licenses")
self.copy("NOTICE.txt", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def _lib_name(self, name):
if self.settings.compiler == "Visual Studio" and not self.options.shared:
return "{}_static".format(name)
else:
return "{}".format(name)
def package_id(self):
self.info.options.with_jemalloc = self._with_jemalloc()
self.info.options.with_gflags = self._with_gflags()
self.info.options.with_protobuf = self._with_protobuf()
self.info.options.with_re2 = self._with_re2()
self.info.options.with_jemalloc = self._with_jemalloc()
self.info.options.with_openssl = self._with_openssl()
self.info.options.with_boost = self._with_boost()
self.info.options.with_glog = self._with_glog()
self.info.options.with_grpc = self._with_grpc()
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "Arrow"
self.cpp_info.filenames["cmake_find_package_multi"] = "Arrow"
self.cpp_info.components["libarrow"].libs = [self._lib_name("arrow")]
self.cpp_info.components["libarrow"].names["cmake_find_package"] = "arrow"
self.cpp_info.components["libarrow"].names["cmake_find_package_multi"] = "arrow"
self.cpp_info.components["libarrow"].names["pkg_config"] = "arrow"
if not self.options.shared:
self.cpp_info.components["libarrow"].defines = ["ARROW_STATIC"]
if self.settings.os == "Linux":
self.cpp_info.components["libarrow"].system_libs = ["pthread"]
if self.options.parquet:
self.cpp_info.components["libparquet"].libs = [self._lib_name("parquet")]
self.cpp_info.components["libparquet"].names["cmake_find_package"] = "parquet"
self.cpp_info.components["libparquet"].names["cmake_find_package_multi"] = "parquet"
self.cpp_info.components["libparquet"].names["pkg_config"] = "parquet"
self.cpp_info.components["libparquet"].requires = ["libarrow"]
if self.options.plasma:
self.cpp_info.components["libplasma"].libs = [self._lib_name("plasma")]
self.cpp_info.components["libplasma"].names["cmake_find_package"] = "plasma"
self.cpp_info.components["libplasma"].names["cmake_find_package_multi"] = "plasma"
self.cpp_info.components["libplasma"].names["pkg_config"] = "plasma"
self.cpp_info.components["libplasma"].requires = ["libarrow"]
if self.options.gandiva:
self.cpp_info.components["libgandiva"].libs = [self._lib_name("gandiva")]
self.cpp_info.components["libgandiva"].names["cmake_find_package"] = "gandiva"
self.cpp_info.components["libgandiva"].names["cmake_find_package_multi"] = "gandiva"
self.cpp_info.components["libgandiva"].names["pkg_config"] = "gandiva"
self.cpp_info.components["libgandiva"].requires = ["libarrow"]
if self.options.with_flight_rpc:
self.cpp_info.components["libarrow_flight"].libs = [self._lib_name("arrow_flight")]
self.cpp_info.components["libarrow_flight"].names["cmake_find_package"] = "flight_rpc"
self.cpp_info.components["libarrow_flight"].names["cmake_find_package_multi"] = "flight_rpc"
self.cpp_info.components["libarrow_flight"].names["pkg_config"] = "flight_rpc"
self.cpp_info.components["libarrow_flight"].requires = ["libarrow"]
if self.options.dataset_modules:
self.cpp_info.components["dataset"].libs = ["arrow_dataset"]
if self.options.cli:
binpath = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH env var: {}".format(binpath))
self.env_info.PATH.append(binpath)
if self._with_boost():
if self.options.gandiva:
# FIXME: only filesystem component is used
self.cpp_info.components["libgandiva"].requires.append("boost::boost")
if self.options.parquet and self.settings.compiler == "gcc" and self.settings.compiler.version < tools.Version("4.9"):
self.cpp_info.components["libparquet"].requires.append("boost::boost")
if tools.Version(self.version) >= "2.0":
# FIXME: only headers components is used
self.cpp_info.components["libarrow"].requires.append("boost::boost")
if self._with_openssl():
self.cpp_info.components["libarrow"].requires.append("openssl::openssl")
if self._with_gflags():
self.cpp_info.components["libarrow"].requires.append("gflags::gflags")
if self._with_glog():
self.cpp_info.components["libarrow"].requires.append("glog::glog")
if self._with_jemalloc():
self.cpp_info.components["libarrow"].requires.append("jemalloc::jemalloc")
if self._with_re2():
self.cpp_info.components["libgandiva"].requires.append("re2::re2")
if self._with_llvm():
self.cpp_info.components["libgandiva"].requires.append("llvm-core::llvm-core")
if self._with_protobuf():
self.cpp_info.components["libarrow"].requires.append("protobuf::protobuf")
if self._with_utf8proc():
self.cpp_info.components["libarrow"].requires.append("uff8proc::uff8proc")
if self._with_thrift():
self.cpp_info.components["libarrow"].requires.append("thrift::thrift")
if self.options.with_backtrace:
self.cpp_info.components["libarrow"].requires.append("libbacktrace::libbacktrace")
if self.options.with_cuda:
self.cpp_info.components["libarrow"].requires.append("cuda::cuda")
if self.options.with_hiveserver2:
self.cpp_info.components["libarrow"].requires.append("hiveserver2::hiveserver2")
if self.options.with_json:
self.cpp_info.components["libarrow"].requires.append("rapidjson::rapidjson")
if self.options.with_s3:
self.cpp_info.components["libarrow"].requires.append("aws-sdk-cpp::filesystem")
if self.options.with_orc:
self.cpp_info.components["libarrow"].requires.append("orc::orc")
if self.options.with_brotli:
self.cpp_info.components["libarrow"].requires.append("brotli::brotli")
if self.options.with_bz2:
self.cpp_info.components["libarrow"].requires.append("bzip2::bzip2")
if self.options.with_lz4:
self.cpp_info.components["libarrow"].requires.append("lz4::lz4")
if self.options.with_snappy:
self.cpp_info.components["libarrow"].requires.append("snappy::snappy")
if self.options.get_safe("simd_level") != None or self.options.get_safe("runtime_simd_level") != None:
self.cpp_info.components["libarrow"].requires.append("xsimd::xsimd")
if self.options.with_zlib:
self.cpp_info.components["libarrow"].requires.append("zlib::zlib")
if self.options.with_zstd:
self.cpp_info.components["libarrow"].requires.append("zstd::zstd")
if self.options.with_flight_rpc:
self.cpp_info.components["libarrow_flight"].requires.append("grpc::grpc")
self.cpp_info.components["libarrow_flight"].requires.append("protobuf::protobuf")
| 49.452514 | 141 | 0.641136 |
14a2cf8d7f855787a7c9f9c6ce3167e6c0abf44d | 8,030 | py | Python | bpython/line.py | ocurero/bpython | 32363dc8bea1fd12997f314cb424bf72cbe1e599 | [
"PSF-2.0"
] | null | null | null | bpython/line.py | ocurero/bpython | 32363dc8bea1fd12997f314cb424bf72cbe1e599 | [
"PSF-2.0"
] | null | null | null | bpython/line.py | ocurero/bpython | 32363dc8bea1fd12997f314cb424bf72cbe1e599 | [
"PSF-2.0"
] | null | null | null | """Extracting and changing portions of the current line
All functions take cursor offset from the beginning of the line and the line of
Python code, and return None, or a tuple of the start index, end index, and the
word."""
from itertools import chain
from collections import namedtuple
from typing import Optional
from .lazyre import LazyReCompile
LinePart = namedtuple("LinePart", ["start", "stop", "word"])
current_word_re = LazyReCompile(r"(?<![)\]\w_.])" r"([\w_][\w0-9._]*[(]?)")
def current_word(cursor_offset: int, line: str) -> Optional[LinePart]:
"""the object.attribute.attribute just before or under the cursor"""
pos = cursor_offset
start = pos
end = pos
word = None
for m in current_word_re.finditer(line):
if m.start(1) < pos and m.end(1) >= pos:
start = m.start(1)
end = m.end(1)
word = m.group(1)
if word is None:
return None
return LinePart(start, end, word)
current_dict_key_re = LazyReCompile(r"""[\w_][\w0-9._]*\[([\w0-9._(), '"]*)""")
def current_dict_key(cursor_offset: int, line: str) -> Optional[LinePart]:
"""If in dictionary completion, return the current key"""
for m in current_dict_key_re.finditer(line):
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_dict_re = LazyReCompile(r"""([\w_][\w0-9._]*)\[([\w0-9._(), '"]*)""")
def current_dict(cursor_offset: int, line: str) -> Optional[LinePart]:
"""If in dictionary completion, return the dict that should be used"""
for m in current_dict_re.finditer(line):
if m.start(2) <= cursor_offset and m.end(2) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_string_re = LazyReCompile(
'''(?P<open>(?:""")|"|(?:''\')|')(?:((?P<closed>.+?)(?P=open))|'''
"""(?P<unclosed>.+))"""
)
def current_string(cursor_offset: int, line: str) -> Optional[LinePart]:
"""If inside a string of nonzero length, return the string (excluding
quotes)
Weaker than bpython.Repl's current_string, because that checks that a
string is a string based on previous lines in the buffer."""
for m in current_string_re.finditer(line):
i = 3 if m.group(3) else 4
if m.start(i) <= cursor_offset and m.end(i) >= cursor_offset:
return LinePart(m.start(i), m.end(i), m.group(i))
return None
current_object_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]")
def current_object(cursor_offset: int, line: str) -> Optional[LinePart]:
"""If in attribute completion, the object on which attribute should be
looked up."""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
s = ""
for m in current_object_re.finditer(word):
if m.end(1) + start < cursor_offset:
if s:
s += "."
s += m.group(1)
if not s:
return None
return LinePart(start, start + len(s), s)
current_object_attribute_re = LazyReCompile(r"([\w_][\w0-9_]*)[.]?")
def current_object_attribute(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""If in attribute completion, the attribute being completed"""
# TODO replace with more general current_expression_attribute
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
matches = current_object_attribute_re.finditer(word)
next(matches)
for m in matches:
if (
m.start(1) + start <= cursor_offset
and m.end(1) + start >= cursor_offset
):
return LinePart(m.start(1) + start, m.end(1) + start, m.group(1))
return None
current_from_import_from_re = LazyReCompile(
r"from +([\w0-9_.]*)(?:\s+import\s+([\w0-9_]+[,]?\s*)+)*"
)
def current_from_import_from(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""If in from import completion, the word after from
returns None if cursor not in or just after one of the two interesting
parts of an import: from (module) import (name1, name2)
"""
# TODO allow for as's
for m in current_from_import_from_re.finditer(line):
if (m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or (
m.start(2) < cursor_offset and m.end(2) >= cursor_offset
):
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_from_import_import_re_1 = LazyReCompile(r"from\s+([\w0-9_.]*)\s+import")
current_from_import_import_re_2 = LazyReCompile(r"([\w0-9_]+)")
current_from_import_import_re_3 = LazyReCompile(r", *([\w0-9_]*)")
def current_from_import_import(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""If in from import completion, the word after import being completed
returns None if cursor not in or just after one of these words
"""
baseline = current_from_import_import_re_1.search(line)
if baseline is None:
return None
match1 = current_from_import_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
for m in chain(
(match1,),
current_from_import_import_re_3.finditer(line[baseline.end() :]),
):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
return None
current_import_re_1 = LazyReCompile(r"import")
current_import_re_2 = LazyReCompile(r"([\w0-9_.]+)")
current_import_re_3 = LazyReCompile(r"[,][ ]*([\w0-9_.]*)")
def current_import(cursor_offset: int, line: str) -> Optional[LinePart]:
# TODO allow for multiple as's
baseline = current_import_re_1.search(line)
if baseline is None:
return None
match1 = current_import_re_2.search(line[baseline.end() :])
if match1 is None:
return None
for m in chain(
(match1,), current_import_re_3.finditer(line[baseline.end() :])
):
start = baseline.end() + m.start(1)
end = baseline.end() + m.end(1)
if start < cursor_offset and end >= cursor_offset:
return LinePart(start, end, m.group(1))
return None
current_method_definition_name_re = LazyReCompile(r"def\s+([a-zA-Z_][\w]*)")
def current_method_definition_name(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""The name of a method being defined"""
for m in current_method_definition_name_re.finditer(line):
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
current_single_word_re = LazyReCompile(r"(?<![.])\b([a-zA-Z_][\w]*)")
def current_single_word(cursor_offset: int, line: str) -> Optional[LinePart]:
"""the un-dotted word just before or under the cursor"""
for m in current_single_word_re.finditer(line):
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
def current_dotted_attribute(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""The dotted attribute-object pair before the cursor"""
match = current_word(cursor_offset, line)
if match is None:
return None
start, end, word = match
if "." in word[1:]:
return LinePart(start, end, word)
return None
current_expression_attribute_re = LazyReCompile(
r"[.]\s*((?:[\w_][\w0-9_]*)|(?:))"
)
def current_expression_attribute(
cursor_offset: int, line: str
) -> Optional[LinePart]:
"""If after a dot, the attribute being completed"""
# TODO replace with more general current_expression_attribute
for m in current_expression_attribute_re.finditer(line):
if m.start(1) <= cursor_offset and m.end(1) >= cursor_offset:
return LinePart(m.start(1), m.end(1), m.group(1))
return None
| 32.909836 | 80 | 0.645081 |
8d154763bf810dc9f668988f05f53dd32a354a31 | 164 | py | Python | configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 426 | 2020-10-16T08:09:27.000Z | 2022-03-30T03:36:04.000Z | configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 170 | 2020-09-08T12:29:06.000Z | 2022-03-31T18:28:09.000Z | configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py | evgps/mmdetection_trashcan | aaf4237c2c0d473425cdc7b741d3009177b79751 | [
"Apache-2.0"
] | 84 | 2021-05-29T06:58:14.000Z | 2022-03-31T07:44:10.000Z | _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py'
# model settings
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
| 27.333333 | 57 | 0.743902 |
3777a2aaf0cd76aca78ecafa8935c65e7b9a47df | 80,840 | py | Python | Packs/Mimecast/Integrations/MimecastV2/MimecastV2.py | asiadeepinstinct/content | dcb4a87a55d052e0189b6ed1059fb8116e7304ab | [
"MIT"
] | null | null | null | Packs/Mimecast/Integrations/MimecastV2/MimecastV2.py | asiadeepinstinct/content | dcb4a87a55d052e0189b6ed1059fb8116e7304ab | [
"MIT"
] | null | null | null | Packs/Mimecast/Integrations/MimecastV2/MimecastV2.py | asiadeepinstinct/content | dcb4a87a55d052e0189b6ed1059fb8116e7304ab | [
"MIT"
] | null | null | null | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import hmac
import uuid
import json
import base64
import hashlib
import requests
from datetime import timedelta
from urllib2 import HTTPError
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
BASE_URL = demisto.params().get('baseUrl')
ACCESS_KEY = demisto.params().get('accessKey')
SECRET_KEY = demisto.params().get('secretKey')
APP_ID = demisto.params().get('appId')
APP_KEY = demisto.params().get('appKey')
USE_SSL = None # assigned in determine_ssl_usage
PROXY = True if demisto.params().get('proxy') else False
# Flags to control which type of incidents are being fetched
FETCH_URL = demisto.params().get('fetchURL')
FETCH_ATTACHMENTS = demisto.params().get('fetchAttachments')
FETCH_IMPERSONATIONS = demisto.params().get('fetchImpersonations')
# Used to refresh token / discover available auth types / login
EMAIL_ADDRESS = demisto.params().get('email')
PASSWORD = demisto.params().get('password')
FETCH_DELTA = int(demisto.params().get('fetchDelta', 24))
LOG("command is {}".format(demisto.command()))
# default query xml template for test module
default_query_xml = "<?xml version=\"1.0\"?> \n\
<xmlquery trace=\"iql,muse\">\n\
<metadata query-type=\"emailarchive\" archive=\"true\" active=\"false\" page-size=\"25\" startrow=\"0\">\n\
<smartfolders/>\n\
<return-fields>\n\
<return-field>attachmentcount</return-field>\n\
<return-field>status</return-field>\n\
<return-field>subject</return-field>\n\
<return-field>size</return-field>\n\
<return-field>receiveddate</return-field>\n\
<return-field>displayfrom</return-field>\n\
<return-field>id</return-field>\n\
<return-field>displayto</return-field>\n\
<return-field>smash</return-field>\n\
</return-fields>\n\
</metadata>\n\
<muse>\n\
<text></text>\n\
<date select=\"last_year\"/>\n\
<sent></sent>\n\
<docs select=\"optional\"></docs>\n\
<route/>\n\
</muse>\n\
</xmlquery>"
''' HELPER FUNCTIONS '''
def determine_ssl_usage():
global USE_SSL
old_insecure = demisto.params().get('insecure', None)
if old_insecure:
USE_SSL = True if old_insecure else False
return
USE_SSL = False if demisto.params().get('new_insecure') else True
def epoch_seconds(d=None):
"""
Return the number of seconds for given date. If no date, return current.
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def auto_refresh_token():
"""
Check if we have a valid token, if not automatically renew validation time for 3 days when necessary params are provided
"""
if APP_ID and EMAIL_ADDRESS and PASSWORD:
integration_context = demisto.getIntegrationContext()
last_update_ts = integration_context.get('token_last_update')
current_ts = epoch_seconds()
if (last_update_ts and current_ts - last_update_ts > 60 * 60 * 24 * 3 - 1800) or last_update_ts is None:
refresh_token_request()
current_ts = epoch_seconds()
demisto.setIntegrationContext({'token_last_update': current_ts})
def http_request(method, api_endpoint, payload=None, params={}, user_auth=True, is_file=False):
is_user_auth = True
url = BASE_URL + api_endpoint
# 2 types of auth, user and non user, mostly user is needed
if user_auth:
# Generate request header values
request_id = str(uuid.uuid4())
hdr_date = datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S") + " UTC"
# Create the HMAC SHA1 of the Base64 decoded secret key for the Authorization header
hmac_sha1 = hmac.new(SECRET_KEY.decode("base64"), ':'.join([hdr_date, request_id, api_endpoint, APP_KEY]), # type: ignore
digestmod=hashlib.sha1).digest()
# Use the HMAC SHA1 value to sign the hdrDate + ":" requestId + ":" + URI + ":" + appkey
signature = base64.encodestring(hmac_sha1).rstrip()
# Create request headers
headers = {
'Authorization': 'MC ' + ACCESS_KEY + ':' + signature,
'x-mc-app-id': APP_ID,
'x-mc-date': hdr_date,
'x-mc-req-id': request_id,
'Content-Type': 'application/json'
}
else:
# This type of auth is only supported for basic commands: login/discover/refresh-token
is_user_auth = False
auth = base64.b64encode(EMAIL_ADDRESS + ':' + PASSWORD)
auth_type = 'Basic-Cloud'
auth_header = auth_type + ' ' + auth
headers = {
'x-mc-app-id': APP_ID,
'Content-Type': 'application/json',
'Authorization': auth_header
}
LOG('running %s request with url=%s\tparams=%s\tdata=%s\tis user auth=%s' % (
method, url, json.dumps(params), json.dumps(payload), is_user_auth))
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
headers=headers,
data=payload
)
res.raise_for_status()
if is_file:
return res
return res.json()
except HTTPError as e:
LOG(e)
if e.response.status_code == 418: # type: ignore # pylint: disable=no-member
if not APP_ID or not EMAIL_ADDRESS or not PASSWORD:
return_error(
'Credentials provided are expired, could not automatically refresh tokens.'
' App ID + Email Address '
'+ Password are required.')
else:
raise
except Exception as e:
LOG(e)
raise
def parse_query_args(args):
query_xml = default_query_xml
if args.get('pageSize'):
query_xml = query_xml.replace('page-size=\"25\"', 'page-size=\"' + args.get('pageSize') + '\"')
if args.get('startRow'):
query_xml = query_xml.replace('startrow=\"0\"', 'startrow=\"' + args.get('startRow') + '\"')
if args.get('active') == 'true':
query_xml = query_xml.replace('active=\"false\"', 'active=\"true\"')
if args.get('body'):
query_xml = query_xml.replace('<text></text>', '<text>(body: ' + args.get('body') + ')</text>')
if args.get('subject'):
query_xml = query_xml.replace('<text></text>', '<text>(subject: ' + args.get('subject') + ')</text>')
if args.get('text'):
query_xml = query_xml.replace('<text></text>', '<text>' + args.get('text') + '</text>')
if args.get('date'):
query_xml = query_xml.replace('<date select=\"last_year\"/>', '<date select=\"' + args.get('date') + '\"/>')
if args.get('dateTo') or args.get('dateFrom'):
return_error('Cannot use both date and dateFrom/dateTo arguments')
date_to = ""
date_from = ""
if args.get('dateTo'):
date_to = args.get('dateTo')
if args.get('dateFrom'):
date_from = args.get('dateFrom')
if date_to and date_from:
query_xml = query_xml.replace('<date select=\"last_year\"/>',
'<date select=\"between\" from=\"' + date_from + '\" to=\"' + date_to + '\" />')
elif date_from:
query_xml = query_xml.replace('<date select=\"last_year\"/>',
'<date select=\"between\" from=\"' + date_from + '\" />')
elif date_to:
query_xml = query_xml.replace('<date select=\"last_year\"/>',
'<date select=\"between\" to=\"' + date_to + '\" />')
if args.get('sentFrom'):
query_xml = query_xml.replace('<sent></sent>', '<sent select=\"from\" >' + args.get('sentFrom') + '</sent>')
if args.get('sentTo'):
query_xml = query_xml.replace('<sent></sent>', '<sent select=\"to\" >' + args.get('sentTo') + '</sent>')
query_xml = query_xml.replace('<sent></sent>', '') # no empty tag
if args.get('attachmentText'):
query_xml = query_xml.replace('</docs>', args.get('attachmentText') + '</docs>')
if args.get('attachmentType'):
query_xml = query_xml.replace('<docs select=\"optional\">',
'<docs select=\"' + args.get('attachmentType') + '\">')
return query_xml
'''COMMANDS '''
def test_module():
if not ACCESS_KEY:
return_error('Cannot test valid connection without the Access Key parameter.')
list_managed_url()
def query():
headers = ['Subject', 'Display From', 'Display To', 'Received Date', 'Size', 'Attachment Count', 'Status', 'ID']
contents = []
context = {}
messages_context = []
query_xml = ''
if demisto.args().get('queryXml'):
query_xml = demisto.args().get('queryXml')
else:
query_xml = parse_query_args(demisto.args())
if demisto.args().get('dryRun') == 'true':
return query_xml
messages = query_request(query_xml)
for message in messages:
contents.append({
'Subject': message.get('subject'),
'From': message.get('displayfrom'),
'To': message.get('displayto'),
'Received Date': message.get('receiveddate'),
'Size': message.get('size'),
'Attachment Count': message.get('attachmentcount'),
'Status': message.get('status'),
'ID': message.get('id')
})
messages_context.append({
'Subject': message.get('subject'),
'Sender': message.get('displayfrom'),
'Recipient': message.get('displayto'),
'ReceivedDate': message.get('receiveddate'),
'Size': message.get('size'),
'AttachmentCount': message.get('attachmentcount'),
'Status': message.get('status'),
'ID': message.get('id')
})
context['Mimecast.Message(val.ID && val.ID == obj.ID)'] = messages_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast archived emails', contents, headers),
'EntryContext': context
}
return results
def query_request(query_xml):
api_endpoint = '/api/archive/search'
# API request demands admin boolean, since we don't have any other support but admin we simply pass true.
data = [{
'admin': True,
'query': query_xml
}]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, json.dumps(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0].get('items')
def url_decode():
headers = [] # type: List[str]
contents = {}
context = {}
protected_url = demisto.args().get('url').encode('utf-8')
decoded_url = url_decode_request(protected_url)
contents['Decoded URL'] = decoded_url
context[outputPaths['url']] = {
'Data': protected_url,
'Mimecast': {
'DecodedURL': decoded_url
}
}
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast Decoded URL:', contents, headers),
'EntryContext': context
}
return results
def url_decode_request(url):
# Setup required variables
api_endpoint = '/api/ttp/url/decode-url'
payload = {
'data': [
{
'url': url
}
]
}
response = http_request('POST', api_endpoint, str(payload))
if not response.get('data')[0].get('url'):
return_error('No URL has been returned from the service')
return response.get('data')[0].get('url')
def get_policy():
headers = ['Policy ID', 'Sender', 'Reciever', 'Bidirectional', 'Start', 'End']
contents = []
context = {}
title = 'Mimecast list blocked sender policies: \n These are the existing Blocked Sender Policies:'
policy_id = demisto.args().get('policyID')
if policy_id:
policy_id = policy_id.encode('utf-8')
title = 'Mimecast Get Policy'
policies_list = get_policy_request(policy_id)
policies_context = []
for policy_list in policies_list:
policy = policy_list.get('policy')
sender = policy.get('from')
reciever = policy.get('to')
contents.append({
'Policy ID': policy_list['id'],
'Sender': {
'Group': sender.get('groupId'),
'Email Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Reciever': {
'Group': reciever.get('groupId'),
'Email Address': reciever.get('emailAddress'),
'Domain': reciever.get('emailDomain'),
'Type': reciever.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'Start': policy.get('fromDate'),
'End': policy.get('toDate')
})
policies_context.append({
'ID': policy_list['id'],
'Sender': {
'Group': sender.get('groupId'),
'Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Reciever': {
'Group': reciever.get('groupId'),
'Address': reciever.get('emailAddress'),
'Domain': reciever.get('emailDomain'),
'Type': reciever.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'FromDate': policy.get('fromDate'),
'ToDate': policy.get('toDate')
})
context['Mimecast.Policy(val.ID && val.ID == obj.ID)'] = policies_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, contents, headers),
'EntryContext': context
}
return results
def get_policy_request(policy_id=None):
# Setup required variables
api_endpoint = '/api/policy/blockedsenders/get-policy'
data = []
if policy_id:
data.append({
'id': policy_id
})
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')
def get_arguments_for_policy_command(args):
# type: (dict) -> Tuple[dict, str]
"""
Args:
args: Demisto arguments
Returns:
tuple. policy arguments, and option to choose from the policy configuration.
"""
description = args.get('description', '').encode('utf-8')
from_part = args.get('fromPart', '').encode('utf-8')
from_type = args.get('fromType', '').encode('utf-8')
from_value = args.get('fromValue', '').encode('utf-8')
to_type = args.get('toType', '').encode('utf-8')
to_value = args.get('toValue', '').encode('utf-8')
option = str(args.get('option', '').encode('utf-8'))
policy_obj = {
'description': description,
'fromPart': from_part,
'fromType': from_type,
'fromValue': from_value,
'toType': to_type,
'toValue': to_value
}
return policy_obj, option
def create_policy():
headers = ['Policy ID', 'Description', 'Sender', 'Receiver', 'Bidirectional', 'Start', 'End']
context = {}
policy_args = demisto.args()
policy_obj, option = get_arguments_for_policy_command(policy_args)
policy_list = create_or_update_policy_request(policy_obj, option)
policy = policy_list.get('policy')
policy_id = policy_list.get('id')
title = 'Mimecast Create Policy: \n Policy Was Created Successfully!'
sender = policy.get('from')
receiver = policy.get('to')
description = policy.get('description')
content = {
'Policy ID': policy_id,
'Description': description,
'Sender': {
'Group': sender.get('groupId'),
'Email Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Receiver': {
'Group': receiver.get('groupId'),
'Email Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Reciever': {
'Group': receiver.get('groupId'),
'Email Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'Start': policy.get('fromDate'),
'End': policy.get('toDate')
} # type: Dict[Any, Any]
policies_context = {
'ID': policy_id,
'Description': description,
'Sender': {
'Group': sender.get('groupId'),
'Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Receiver': {
'Group': receiver.get('groupId'),
'Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Reciever': {
'Group': receiver.get('groupId'),
'Email Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'FromDate': policy.get('fromDate'),
'ToDate': policy.get('toDate')
} # type: Dict[Any, Any]
context['Mimecast.Policy(val.ID && val.ID == obj.ID)'] = policies_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': policy_list,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, content, headers),
'EntryContext': context
}
return results
def set_empty_value_args_policy_update(policy_obj, option, policy_id):
"""
The function use the get policy request function to fill the empty arguments in the policy
Args:
policy_obj (Dict): Dict of policy details
option: (str) Policy option
policy_id: (str) Policy ID
Returns:
Tuple. Policy object, the option to configure on the policy, policy id.
"""
empty_args_list = []
# Add the empty arguments to empty args list
for arg, value in policy_obj.items():
if value == '':
empty_args_list.append(arg)
if option == '':
empty_args_list.append("option")
# Check if there are any empty arguments
if len(empty_args_list) > 0:
# Fill the empty arguments with the current data using get policy request function
policy_details = get_policy_request(policy_id)[0]
for arg in empty_args_list:
if arg == "option":
option = policy_details["option"].encode("utf-8")
else:
policy_obj[arg] = policy_details["policy"][arg].encode("utf-8")
return policy_obj, option, policy_id
def update_policy():
"""
Update policy according to policy ID
"""
headers = ['Policy ID', 'Description', 'Sender', 'Receiver', 'Bidirectional', 'Start', 'End']
context = {}
policy_args = demisto.args()
policy_obj, option = get_arguments_for_policy_command(policy_args)
policy_id = str(policy_args.get('policy_id', '').encode('utf-8'))
if not policy_id:
return_error("You need to enter policy ID")
policy_obj, option, policy_id = set_empty_value_args_policy_update(policy_obj, option, policy_id)
response = create_or_update_policy_request(policy_obj, option, policy_id=policy_id)
policy = response.get('policy')
title = 'Mimecast Update Policy: \n Policy Was Updated Successfully!'
sender = policy.get('from')
receiver = policy.get('to')
description = policy.get('description')
contents = {
'Policy ID': policy_id,
'Description': description,
'Sender': {
'Group': sender.get('groupId'),
'Email Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Receiver': {
'Group': receiver.get('groupId'),
'Email Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'Start': policy.get('fromDate'),
'End': policy.get('toDate')
} # type: Dict[Any, Any]
policies_context = {
'ID': policy_id,
'Description': description,
'Sender': {
'Group': sender.get('groupId'),
'Address': sender.get('emailAddress'),
'Domain': sender.get('emailDomain'),
'Type': sender.get('type')
},
'Receiver': {
'Group': receiver.get('groupId'),
'Address': receiver.get('emailAddress'),
'Domain': receiver.get('emailDomain'),
'Type': receiver.get('type')
},
'Bidirectional': policy.get('bidirectional'),
'FromDate': policy.get('fromDate'),
'ToDate': policy.get('toDate')
} # type: Dict[Any, Any]
context['Mimecast.Policy(val.ID && val.ID == obj.ID)'] = policies_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, contents, headers),
'EntryContext': context
}
return results
def create_or_update_policy_request(policy, option, policy_id=None):
# Setup required variables
api_endpoint = '/api/policy/blockedsenders/create-policy'
payload = {
'data': [{
'policy': policy,
'option': option
}]
}
# Policy ID isnt None if it is an update policy request cause its required to
# write a policy ID on update policy command
if policy_id:
payload['data'][0]['id'] = policy_id
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def delete_policy():
contents = [] # type: List[Any]
context = {}
policy_id = demisto.args().get('policyID').encode('utf-8')
delete_policy_request(policy_id)
context['Mimecast.Policy(val.ID && val.ID == obj.ID)'] = {
'ID': policy_id,
'Deleted': True
}
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Mimecast Policy {} deleted successfully!'.format(policy_id),
'EntryContext': context
}
return results
def delete_policy_request(policy_id=None):
# Setup required variables
api_endpoint = '/api/policy/blockedsenders/delete-policy'
data = [{
'id': policy_id
}]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
if response.get('data')[0].get('id') != policy_id:
return_error('Policy was not deleted.')
return response.get('data')[0]
def manage_sender():
headers = [] # type: List[str]
context = {}
sender = demisto.args().get('sender').encode('utf-8')
recipient = demisto.args().get('recipient').encode('utf-8')
action = demisto.args().get('action').encode('utf-8')
title_action = 'permitted' if action == 'permit' else 'blocked'
title = 'Mimecast messages from {} to {} will now be {}!'.format(sender, recipient, title_action)
req_obj = {
'sender': sender,
'to': recipient,
'action': action
}
managed_sender = manage_sender_request(req_obj)
contents = {
'Sender': managed_sender.get('sender'),
'Recipient': managed_sender.get('to'),
'Action': managed_sender.get('type'),
'ID': managed_sender.get('id')
}
context['Mimecast.Managed(val.ID && val.ID == obj.ID)'] = {
'Sender': managed_sender.get('sender'),
'Recipient': managed_sender.get('to'),
'Action': managed_sender.get('type'),
'ID': managed_sender.get('id')
}
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, contents, headers),
'EntryContext': context
}
return results
def manage_sender_request(req_obj):
# Setup required variables
api_endpoint = '/api/managedsender/permit-or-block-sender'
data = []
data.append(req_obj)
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def list_managed_url():
headers = ['URL', 'Action', 'Match Type', 'User Awareness', 'URL Rewriting', 'Comment']
contents = []
context = {}
managed_urls_context = []
full_url_response = ''
url = demisto.args().get('url')
if url:
url = url.encode('utf-8')
managed_urls = list_managed_url_request()
for managed_url in managed_urls:
query_string = ''
scheme = ''
if managed_url.get('queryString'):
query_string = '?' + managed_url.get('queryString')
if managed_url.get('scheme'):
scheme = managed_url.get('scheme') + '://'
full_url_response = scheme + managed_url.get('domain', '') + managed_url.get('path', '') + query_string
if (url and url in full_url_response) or not url:
contents.append({
'URL': full_url_response,
'Match Type': managed_url.get('matchType'),
'Comment': managed_url.get('comment'),
'Action': managed_url.get('action'),
'URL Rewriting': managed_url.get('disableRewrite'),
'User Awareness': managed_url.get('disableUserAwareness')
})
managed_urls_context.append({
'Domain': managed_url.get('domain'),
'disableLogClick': managed_url.get('disableLogClick'),
'Action': managed_url.get('action'),
'Path': managed_url.get('path'),
'matchType': managed_url.get('matchType'),
'ID': managed_url.get('id'),
'disableRewrite': managed_url.get('disableRewrite')
})
context['Mimecast.URL(val.ID && val.ID == obj.ID)'] = managed_urls_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast Managed URLs: ', contents, headers),
'EntryContext': context
}
return results
def list_managed_url_request():
# Setup required variables
api_endpoint = '/api/ttp/url/get-all-managed-urls'
data = [] # type: List[Any]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')
def create_managed_url():
context = {}
contents = {} # type: Dict[Any, Any]
managed_urls_context = []
url = demisto.args().get('url').encode('utf-8')
action = demisto.args().get('action').encode('utf-8')
match_type = demisto.args().get('matchType').encode('utf-8')
disable_rewrite = demisto.args().get('disableRewrite').encode('utf-8')
disable_user_awareness = demisto.args().get('disableUserAwareness').encode('utf-8')
disable_log_click = demisto.args().get('disableLogClick').encode('utf-8')
comment = demisto.args().get('comment')
if comment:
comment = comment.encode('utf-8')
url_req_obj = {
'comment': comment,
'disableRewrite': disable_rewrite,
'url': url,
'disableUserAwareness': disable_user_awareness,
'disableLogClick': disable_log_click,
'action': action,
'matchType': match_type
}
managed_url = create_managed_url_request(url_req_obj)
managed_urls_context.append({
'Domain': managed_url.get('domain'),
'disableLogClick': managed_url.get('disableLogClick'),
'Action': managed_url.get('action'),
'Path': managed_url.get('path'),
'matchType': managed_url.get('matchType'),
'ID': managed_url.get('id'),
'disableRewrite': managed_url.get('disableRewrite')
})
context['Mimecast.URL(val.ID && val.ID == obj.ID)'] = managed_urls_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Managed URL {} created successfully!'.format(url),
'EntryContext': context
}
return results
def create_managed_url_request(url_obj):
# Setup required variables
api_endpoint = '/api/ttp/url/create-managed-url'
data = []
data.append(url_obj)
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def list_messages():
headers = ['Subject', 'Size', 'Recieved Date', 'From', 'Attachment Count', 'Message ID']
context = {}
contents = []
messages_context = []
search_params = {}
# can't send null values for keys, so if optional value not sent by user, do not add to request.
mailbox = demisto.args().get('mailbox', '').encode('utf-8')
if mailbox:
search_params['mailbox'] = mailbox
view = demisto.args().get('view', '').encode('utf-8')
if view:
search_params['view'] = view
end_time = demisto.args().get('endTime', '').encode('utf-8')
if end_time:
search_params['end'] = end_time
start_time = demisto.args().get('startTime', '').encode('utf-8')
if start_time:
search_params['start'] = start_time
subject = demisto.args().get('subject')
messages_list = list_messages_request(search_params)
for message in messages_list:
if subject == message.get('subject') or not subject:
contents.append({
'Message ID': message.get('id'),
'Subject': message.get('subject'),
'Size': message.get('size'),
'Recieved Date': message.get('received'),
'From': message.get('from').get('emailAddress'),
'Attachment Count': message.get('attachmentCount')
})
messages_context.append({
'Subject': message.get('subject'),
'ID': message.get('id'),
'Size': message.get('size'),
'RecievedDate': message.get('received'),
'From': message.get('from').get('emailAddress'),
'AttachmentCount': message.get('attachmentCount')
})
context['Mimecast.Message(val.ID && val.ID == obj.ID)'] = messages_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast messages list', contents, headers),
'EntryContext': context
}
return results
def list_messages_request(search_params):
# Setup required variables
api_endpoint = '/api/archive/get-message-list'
data = []
data.append(search_params)
payload = {
'meta': {
'pagination': {
}
},
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')
def get_url_logs():
headers = [] # type: List[Any]
contents = []
context = {}
url_logs_context = []
search_params = {}
result_number = demisto.args().get('resultsNumber', '').encode('utf-8')
from_date = demisto.args().get('fromDate', '').encode('utf-8')
to_date = demisto.args().get('toDate', '').encode('utf-8')
scan_result = demisto.args().get('resultType', '').encode('utf-8')
limit = int(demisto.args().get('limit', 100))
if from_date:
search_params['from'] = from_date
if to_date:
search_params['to'] = to_date
if scan_result:
search_params['scanResult'] = scan_result
url_logs = get_url_logs_request(search_params, result_number)
if limit:
url_logs = url_logs[:limit]
for url_log in url_logs:
contents.append({
'Action': url_log.get('action'),
'Admin Override': url_log.get('adminOverride'),
'Category': url_log.get('category'),
'Date': url_log.get('date'),
'Route': url_log.get('route'),
'Scan Result': url_log.get('scanResult'),
'URL': url_log.get('url'),
'User Awareness Action': url_log.get('userAwarenessAction'),
'User Email Address': url_log.get('userEmailAddress'),
'User Override': url_log.get('userOverride')
})
url_logs_context.append({
'Action': url_log.get('action'),
'AdminOverride': url_log.get('adminOverride'),
'Category': url_log.get('category'),
'Date': url_log.get('date'),
'Route': url_log.get('route'),
'Result': url_log.get('scanResult'),
'URL': url_log.get('url'),
'Awareness': url_log.get('userAwarenessAction'),
'Address': url_log.get('userEmailAddress'),
'UserOverride': url_log.get('userOverride')
})
context['Mimecast.UrlLog'] = url_logs_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast URL logs: ', contents, headers),
'EntryContext': context
}
return results
def get_url_logs_request(search_params, result_number=None):
# Setup required variables
api_endpoint = '/api/ttp/url/get-logs'
pagination = {} # type: Dict[Any, Any]
if result_number:
pagination = {'page_size': result_number}
payload = {
'meta': {
'pagination': pagination
},
'data': [search_params]
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0].get('clickLogs')
def get_attachment_logs():
headers = [] # type: List[Any]
contents = []
context = {}
attachment_logs_context = []
search_params = {}
result_number = demisto.args().get('resultsNumber', '').encode('utf-8')
from_date = demisto.args().get('fromDate', '').encode('utf-8')
to_date = demisto.args().get('toDate', '').encode('utf-8')
result = demisto.args().get('resultType', '').encode('utf-8')
limit = int(demisto.args().get('limit', 100))
if from_date:
search_params['from'] = from_date
if to_date:
search_params['to'] = to_date
if result:
search_params['result'] = result
attachment_logs = get_attachment_logs_request(search_params, result_number)
if limit:
attachment_logs = attachment_logs[:limit]
for attachment_log in attachment_logs:
contents.append({
'Result': attachment_log.get('result'),
'Date': attachment_log.get('date'),
'Sender Address': attachment_log.get('senderAddress'),
'File Name': attachment_log.get('fileName'),
'Action': attachment_log.get('actionTriggered'),
'Route': attachment_log.get('route'),
'Details': attachment_log.get('details'),
'Recipient Address': attachment_log.get('recipientAddress'),
'File Type': attachment_log.get('fileType')
})
attachment_logs_context.append({
'Result': attachment_log.get('result'),
'Date': attachment_log.get('date'),
'Sender': attachment_log.get('senderAddress'),
'FileName': attachment_log.get('fileName'),
'Action': attachment_log.get('actionTriggered'),
'Route': attachment_log.get('route'),
'Details': attachment_log.get('details'),
'Recipient': attachment_log.get('recipientAddress'),
'FileType': attachment_log.get('fileType')
})
context['Mimecast.AttachmentLog'] = attachment_logs_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast attachment logs: ', contents, headers),
'EntryContext': context
}
return results
def get_attachment_logs_request(search_params, result_number=None):
# Setup required variables
api_endpoint = '/api/ttp/attachment/get-logs'
pagination = {} # type: Dict[Any, Any]
if result_number:
pagination = {'page_size': result_number}
payload = {
'meta': {
'pagination': pagination
},
'data': [search_params]
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0].get('attachmentLogs')
def get_impersonation_logs():
headers = [] # type: List[Any]
contents = []
context = {}
impersonation_logs_context = []
search_params = {}
result_number = demisto.args().get('resultsNumber', '').encode('utf-8')
from_date = demisto.args().get('fromDate', '').encode('utf-8')
to_date = demisto.args().get('toDate', '').encode('utf-8')
tagged_malicious = demisto.args().get('taggedMalicious', '').encode('utf-8')
search_field = demisto.args().get('searchField', '').encode('utf-8')
query = demisto.args().get('query', '').encode('utf-8')
identifiers = argToList(demisto.args().get('identifiers', '').encode('utf-8'))
actions = argToList(demisto.args().get('actions', '').encode('utf-8'))
limit = int(demisto.args().get('limit', 100))
if from_date:
search_params['from'] = from_date
if to_date:
search_params['to'] = to_date
if tagged_malicious:
search_params['taggedMalicious'] = tagged_malicious
if search_field:
search_params['searchField'] = search_field
if query:
search_params['query'] = query
if identifiers:
search_params['identifiers'] = identifiers
if actions:
search_params['actions'] = actions
impersonation_logs, result_count = get_impersonation_logs_request(search_params, result_number)
if limit:
impersonation_logs = impersonation_logs[:limit]
for impersonation_log in impersonation_logs:
contents.append({
'Result Count': result_count,
'Hits': impersonation_log.get('hits'),
'Malicious': impersonation_log.get('taggedMalicious'),
'Sender IP': impersonation_log.get('senderIpAddress'),
'Sender Address': impersonation_log.get('senderAddress'),
'Subject': impersonation_log.get('subject'),
'Identifiers': impersonation_log.get('identifiers'),
'Date': impersonation_log.get('eventTime'),
'Action': impersonation_log.get('action'),
'Policy': impersonation_log.get('definition'),
'ID': impersonation_log.get('id'),
'Recipient Address': impersonation_log.get('recipientAddress'),
'External': impersonation_log.get('taggedExternal')
})
impersonation_logs_context.append({
'ResultCount': result_count,
'Hits': impersonation_log.get('hits'),
'Malicious': impersonation_log.get('taggedMalicious'),
'SenderIP': impersonation_log.get('senderIpAddress'),
'SenderAddress': impersonation_log.get('senderAddress'),
'Subject': impersonation_log.get('subject'),
'Identifiers': impersonation_log.get('identifiers'),
'Date': impersonation_log.get('eventTime'),
'Action': impersonation_log.get('action'),
'Policy': impersonation_log.get('definition'),
'ID': impersonation_log.get('id'),
'RecipientAddress': impersonation_log.get('recipientAddress'),
'External': impersonation_log.get('taggedExternal')
})
context['Mimecast.Impersonation'] = impersonation_logs_context
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast impersonation logs: ', contents, headers),
'EntryContext': context
}
return results
def get_impersonation_logs_request(search_params, result_number=None):
# Setup required variables
api_endpoint = '/api/ttp/impersonation/get-logs'
pagination = {} # type: Dict[Any, Any]
if result_number:
pagination = {'page_size': result_number}
payload = {
'meta': {
'pagination': pagination
},
'data': [search_params]
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0].get('impersonationLogs'), response.get('data')[0].get('resultCount')
def fetch_incidents():
last_run = demisto.getLastRun()
last_fetch = last_run.get('time')
# handle first time fetch
if last_fetch is None:
last_fetch = datetime.now() - timedelta(hours=FETCH_DELTA)
last_fetch_date_time = last_fetch.strftime("%Y-%m-%dT%H:%M:%S") + '+0000'
else:
last_fetch = datetime.strptime(last_fetch, '%Y-%m-%dT%H:%M:%SZ')
last_fetch_date_time = last_fetch.strftime("%Y-%m-%dT%H:%M:%S") + '+0000'
current_fetch = last_fetch
incidents = [] # type: List[Any]
if FETCH_URL:
search_params = {
'from': last_fetch_date_time,
'scanResult': 'malicious'
}
url_logs = get_url_logs_request(search_params)
for url_log in url_logs:
incident = url_to_incident(url_log)
temp_date = datetime.strptime(incident['occurred'], '%Y-%m-%dT%H:%M:%SZ')
# update last run
if temp_date > last_fetch:
last_fetch = temp_date + timedelta(seconds=1)
# avoid duplication due to weak time query
if temp_date > current_fetch:
incidents.append(incident)
if FETCH_ATTACHMENTS:
search_params = {
'from': last_fetch_date_time,
'result': 'malicious'
}
attachment_logs = get_attachment_logs_request(search_params)
for attachment_log in attachment_logs:
incident = attachment_to_incident(attachment_log)
temp_date = datetime.strptime(incident['occurred'], '%Y-%m-%dT%H:%M:%SZ')
# update last run
if temp_date > last_fetch:
last_fetch = temp_date + timedelta(seconds=1)
# avoid duplication due to weak time query
if temp_date > current_fetch:
incidents.append(incident)
if FETCH_IMPERSONATIONS:
search_params = {
'from': last_fetch_date_time,
'taggedMalicious': True
}
impersonation_logs, _ = get_impersonation_logs_request(search_params)
for impersonation_log in impersonation_logs:
incident = impersonation_to_incident(impersonation_log)
temp_date = datetime.strptime(incident['occurred'], '%Y-%m-%dT%H:%M:%SZ')
# update last run
if temp_date > last_fetch:
last_fetch = temp_date + timedelta(seconds=1)
# avoid duplication due to weak time query
if temp_date > current_fetch:
incidents.append(incident)
demisto.setLastRun({'time': last_fetch.isoformat().split('.')[0] + 'Z'})
demisto.incidents(incidents)
def url_to_incident(url_log):
incident = {}
incident['name'] = 'Mimecast malicious URL: ' + url_log.get('url')
incident['occurred'] = url_log.get('date').replace('+0000', 'Z')
incident['rawJSON'] = json.dumps(url_log)
return incident
def attachment_to_incident(attachment_log):
incident = {}
incident['name'] = 'Mimecast malicious attachment: ' + attachment_log.get('fileName')
incident['occurred'] = attachment_log.get('date').replace('+0000', 'Z')
incident['rawJSON'] = json.dumps(attachment_log)
return incident
def impersonation_to_incident(impersonation_log):
incident = {}
incident['name'] = 'Mimecast malicious impersonation: ' + impersonation_log.get('subject')
incident['occurred'] = impersonation_log.get('eventTime').replace('+0000', 'Z')
incident['rawJSON'] = json.dumps(impersonation_log)
return incident
def discover():
headers = [] # type: List[Any]
context = {}
context_obj = {} # type: Dict[Any, Any]
contents = []
response = discover_request()
contents.append({
'Authentication Types': response.get('authenticate'),
'Email Address': response.get('emailAddress'),
'Email Token': response.get('emailToken')
})
context_obj = {
'AuthenticationTypes': response.get('authenticate'),
'EmailAddress': response.get('emailAddress'),
'EmailToken': response.get('emailToken')
}
context['Mimecast.Authentication(val.EmailAddress && val.EmailAddress === obj.EmailAddress)'] = context_obj
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast Authentication Information', contents, headers),
'EntryContext': context
}
return results
def discover_request():
if not EMAIL_ADDRESS:
return_error('In order to discover account\'s auth types, account\'s email is required.')
email = EMAIL_ADDRESS.encode('utf-8')
# Setup required variables
api_endpoint = '/api/login/discover-authentication'
payload = {
'data': [{
'emailAddress': email
}]
}
response = http_request('POST', api_endpoint, str(payload), {}, user_auth=False)
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def refresh_token():
contents = refresh_token_request()
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': 'Token has been refreshed succesfully and is valid for the next 3 days'
}
return results
def refresh_token_request():
if not EMAIL_ADDRESS:
return_error('In order to refresh a token validty duration, account\'s email is required.')
if not ACCESS_KEY:
return_error('In order to refresh a token validty duration, account\'s access key is required.')
email = EMAIL_ADDRESS.encode('utf-8')
access_key = ACCESS_KEY.encode('utf-8')
# Setup required variables
api_endpoint = '/api/login/login'
payload = {
'data': [{
'userName': email,
'accessKey': access_key
}]
}
response = http_request('POST', api_endpoint, str(payload), {}, user_auth=False)
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def login():
headers = ['Access Key', 'Secret Key']
contents = []
response = login_request()
contents.append({
'Access Key': response.get('accessKey'),
'Secret Key': response.get('secretKey')
})
results = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast authentication details \n Tokens are valid for 3 days', contents,
headers)
}
return results
def login_request():
if not EMAIL_ADDRESS:
return_error('In order to refresh a token validty duration, account\'s email is required.')
email = EMAIL_ADDRESS.encode('utf-8')
# Setup required variables
api_endpoint = '/api/login/login'
payload = {
'data': [{
'userName': email
}]
}
response = http_request('POST', api_endpoint, str(payload), {}, user_auth=False)
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def get_message():
context = {}
contents = {} # type: Dict[Any, Any]
metadata_context = {} # type: Dict[Any, Any]
results = []
message_id = demisto.args().get('messageID').encode('utf-8')
message_context = demisto.args().get('context').encode('utf-8')
message_type = demisto.args().get('type').encode('utf-8')
message_part = demisto.args().get('part')
if message_part == 'all' or message_part == 'metadata':
contents, metadata_context = get_message_metadata(message_id)
context['Mimecast.Message(val.ID && val.ID === obj.ID)'] = metadata_context
results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': contents,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Mimecast message details', contents, removeNull=True),
'EntryContext': context
})
if message_part == 'all' or message_part == 'message':
email_file = get_message_body_content_request(message_id, message_context, message_type)
results.append(fileResult(message_id, email_file))
return results
def get_message_body_content_request(message_id, message_context, message_type):
# Setup required variables
api_endpoint = '/api/archive/get-message-part'
data = [{
'id': message_id,
'type': message_type,
'context': message_context
}]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload), is_file=True)
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response.content
def get_message_metadata(message_id):
contents = {} # type: Dict[Any, Any]
context = {} # type: Dict[Any, Any]
message = get_message_metadata_request(message_id)
receivers = message.get('to', [])
to_context = []
to_contents = []
for receiver in receivers:
to_context.append({
'EmailAddress': receiver.get('emailAddress')
})
to_contents.append(
receiver.get('emailAddress')
)
copies = message.get('cc', [])
cc_context = []
cc_contents = []
for copy in copies:
cc_context.append({
'EmailAddress': copy.get('emailAddress')
})
cc_contents.append(
copy.get('emailAddress')
)
response_headers = message.get('headers', [])
headers_contents = []
headers_context = []
for header in response_headers:
values = header.get('values')
values = [value.encode('utf-8') for value in values]
headers_context.append({
'Name': header.get('name'),
'Values': values
})
headers_contents.append(
'Name: {}, Values: {}'.format(str(header.get('name')), str(values))
)
attachments = message.get('attachments', [])
attachments_context = []
attachments_contents = []
for attachment in attachments:
attachments_context.append({
'FileName': attachment.get('filename'),
'SHA256': attachment.get('sha256'),
'ID': attachment.get('id'),
'Size': attachment.get('size')
})
attachments_contents.append(
'FileName: {}, SHA256: {}, ID: {}, Size: {}'.format(str(attachment.get('filename')),
str(attachment.get('sha256')),
str(attachment.get('id')), str(attachment.get('size')))
)
contents = {
'Subject': message.get('subject'),
'Header Date': message.get('headerDate'),
'Size': message.get('size'),
'From': message.get('from', {}).get('emailAddress'),
'To': to_contents,
'Reply To': message.get('replyTo', {}).get('emailAddress'),
'CC': cc_contents,
'Envelope From': message.get('envelopeFrom', {}).get('emailAddress'),
'Headers': headers_contents,
'Attachments': attachments_contents,
'Processed': message.get('processed'),
'Has Html Body': message.get('hasHtmlBody'),
'ID': message.get('id')
}
context = {
'Subject': message.get('subject'),
'HeaderDate': message.get('headerDate'),
'Size': message.get('size'),
'From': message.get('from', {}).get('emailAddress'),
'To': to_context,
'ReplyTo': message.get('replyTo', {}).get('emailAddress'),
'CC': cc_context,
'EnvelopeFrom': message.get('envelopeFrom', {}).get('emailAddress'),
'Headers': headers_context,
'Attachments': attachments_context,
'Processed': message.get('processed'),
'HasHtmlBody': message.get('hasHtmlBody'),
'ID': message.get('id')
}
return contents, context
def get_message_metadata_request(message_id):
# Setup required variables
api_endpoint = '/api/archive/get-message-detail'
data = [{
'id': message_id
}]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload))
if response.get('fail'):
return_error(json.dumps(response.get('fail')[0].get('errors')))
return response.get('data')[0]
def download_attachment():
attachment_id = demisto.args().get('attachmentID').encode('utf-8')
attachment_file = download_attachment_request(attachment_id)
return fileResult(attachment_id, attachment_file)
def download_attachment_request(attachment_id):
# Setup required variables
api_endpoint = '/api/archive/get-file'
data = [{
'id': attachment_id
}]
payload = {
'data': data
}
response = http_request('POST', api_endpoint, str(payload), is_file=True)
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response.content
def find_groups():
api_response = create_find_groups_request()
markdown_output = find_groups_api_response_to_markdown(api_response)
entry_context = find_groups_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_find_groups_request():
api_endpoint = '/api/directory/find-groups'
query_string = demisto.args().get('query_string', '').encode('utf-8')
query_source = demisto.args().get('query_source', '').encode('utf-8')
limit = demisto.args().get('limit')
meta = dict() # type: Dict[str, Dict[str, int]]
data = dict() # type: Dict[str, Dict[str, str]]
if limit:
meta['pagination'] = {
'pageSize': int(limit)
}
if query_string:
data['query'] = query_string
if query_source:
data['source'] = query_source
payload = {
'meta': meta,
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def find_groups_api_response_to_markdown(api_response):
num_groups_found = api_response.get('meta', {}).get('pagination', {}).get('pageSize', 0)
query_string = demisto.args().get('query_string', '')
query_source = demisto.args().get('query_source', '')
if not num_groups_found:
md = '### Found 0 groups'
if query_string:
md += '\n#### query: ' + query_string
if query_source:
md += '\n#### source: ' + query_source
return md
md = 'Found ' + str(num_groups_found) + ' groups:'
md_metadata = ''
if query_string:
md_metadata += '#### query: ' + query_string
if query_source:
if md_metadata:
md_metadata += '\n'
md_metadata += '#### source: ' + query_source
groups_list = list()
for group in api_response.get('data', [])[0]['folders']:
group_entry = {
'Name': group['description'],
'Source': group['source'],
'Group ID': group['id'],
'Number of users': group['userCount'],
'Parent ID': group['parentId'],
'Number of child groups': group['folderCount']
}
groups_list.append(group_entry)
md = tableToMarkdown(md, groups_list,
['Name', 'Source', 'Group ID', 'Number of users', 'Parent ID', 'Number of child groups'],
metadata=md_metadata)
return md
def find_groups_api_response_to_context(api_response):
groups_list = list()
for group in api_response['data'][0]['folders']:
group_entry = {
'Name': group['description'],
'Source': group['source'],
'ID': group['id'],
'NumberOfUsers': group['userCount'],
'ParentID': group['parentId'],
'NumberOfChildGroups': group['folderCount']
}
groups_list.append(group_entry)
return {'Mimecast.Group(val.ID && val.ID == obj.ID)': groups_list}
def get_group_members():
api_response = create_get_group_members_request()
markdown_output = group_members_api_response_to_markdown(api_response)
entry_context = group_members_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_get_group_members_request(group_id=-1, limit=100):
api_endpoint = '/api/directory/get-group-members'
group_id = demisto.args().get('group_id', group_id).encode('utf-8')
limit = demisto.args().get('limit', limit)
meta = dict() # type: Dict[str, Dict[str, int]]
data = dict() # type: Dict[str, Dict[str, str]]
if limit:
meta['pagination'] = {
'pageSize': int(limit)
}
data['id'] = group_id
payload = {
'meta': meta,
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def group_members_api_response_to_markdown(api_response):
num_users_found = api_response.get('meta', {}).get('pagination', {}).get('pageSize', 0)
group_id = demisto.args().get('group_id', '')
if not num_users_found:
md = 'Found 0 users for group ID: ' + group_id + ''
return md
md = 'Found ' + str(num_users_found) + ' users for group ID: ' + group_id
users_list = list()
for user in api_response['data'][0]['groupMembers']:
user_entry = {
'Name': user['name'],
'Email address': user['emailAddress'],
'Domain': user['domain'],
'Type': user['type'],
'Internal user': user['internal']
}
users_list.append(user_entry)
md = tableToMarkdown(md, users_list,
['Name', 'Email address', 'Domain', 'Type', 'Internal user'])
return md
def add_users_under_group_in_context_dict(users_list, group_id):
demisto_context = demisto.context()
if demisto_context and 'Mimecast' in demisto_context:
if 'Group' in demisto_context['Mimecast']:
groups_entry_in_context = demisto_context['Mimecast']['Group']
for group in groups_entry_in_context:
if group['ID'] == group_id:
group['Users'] = users_list
return groups_entry_in_context
return [
{
'ID': group_id,
'Users': users_list
}
]
def group_members_api_response_to_context(api_response, group_id=-1):
group_id = demisto.args().get('group_id', group_id)
users_list = list()
for user in api_response['data'][0]['groupMembers']:
user_entry = {
'Name': user['name'],
'EmailAddress': user['emailAddress'],
'Domain': user['domain'],
'Type': user['type'],
'InternalUser': user['internal'],
'IsRemoved': False
}
users_list.append(user_entry)
groups_after_update = add_users_under_group_in_context_dict(users_list, group_id)
return {'Mimecast.Group(val.ID && val.ID == obj.ID)': groups_after_update}
def add_remove_member_to_group(action_type):
if action_type == 'add':
api_endpoint = '/api/directory/add-group-member'
else:
api_endpoint = '/api/directory/remove-group-member'
api_response = create_add_remove_group_member_request(api_endpoint)
markdown_output = add_remove_api_response_to_markdown(api_response, action_type)
entry_context = add_remove_api_response_to_context(api_response, action_type)
return_outputs(markdown_output, entry_context, api_response)
def create_add_remove_group_member_request(api_endpoint):
group_id = demisto.args().get('group_id', '').encode('utf-8')
email = demisto.args().get('email_address', '').encode('utf-8')
domain = demisto.args().get('domain_address', '').encode('utf-8')
data = {
'id': group_id,
}
if email:
data['emailAddress'] = email
if domain:
data['domain'] = domain
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def add_remove_api_response_to_markdown(api_response, action_type):
address_modified = api_response['data'][0]['emailAddress']
group_id = api_response['data'][0]['folderId']
if action_type == 'add':
return address_modified + ' had been added to group ID ' + group_id
return address_modified + ' has been removed from group ID ' + group_id
def change_user_status_removed_in_context(user_info, group_id):
demisto_context = demisto.context()
if demisto_context and 'Mimecast' in demisto_context:
if 'Group' in demisto_context['Mimecast']:
groups_entry_in_context = demisto_context['Mimecast']['Group']
for group in groups_entry_in_context:
if group['ID'] == group_id:
for user in group['Users']:
if user['EmailAddress'] == user_info['EmailAddress']:
user['IsRemoved'] = True
return groups_entry_in_context
return [
{
'ID': group_id,
'Users': [user_info]
}
]
def add_remove_api_response_to_context(api_response, action_type):
group_id = api_response['data'][0]['folderId']
if action_type == 'add':
# Run get group members again, to get all relevant data, the response from add user
# does not match the get group members.
api_response = create_get_group_members_request(group_id=group_id)
return group_members_api_response_to_context(api_response, group_id=group_id)
else:
address_removed = api_response['data'][0]['emailAddress']
removed_user = {
'EmailAddress': address_removed,
'IsRemoved': True
}
groups_after_update = change_user_status_removed_in_context(removed_user, group_id)
return {'Mimecast.Group(val.ID && val.ID == obj.ID)': groups_after_update}
def create_group():
api_response = create_group_request()
markdown_output = create_group_api_response_to_markdown(api_response)
entry_context = create_group_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_group_request():
api_endpoint = '/api/directory/create-group'
group_name = demisto.args().get('group_name', '').encode('utf-8')
parent_id = demisto.args().get('parent_id', '-1').encode('utf-8')
data = {
'description': group_name,
}
if parent_id != '-1'.encode('utf-8'):
data['parentId'] = parent_id
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def create_group_api_response_to_markdown(api_response):
group_name = api_response['data'][0]['description']
group_source = api_response['data'][0]['source']
group_id = api_response['data'][0]['id']
md = group_name + ' has been created'
group_info = {
'Group Source': group_source,
'Group ID': group_id
}
return tableToMarkdown(md, group_info, ['Group Source', 'Group ID'])
def create_group_api_response_to_context(api_response):
group_created = {
'Name': api_response['data'][0]['description'],
'Source': api_response['data'][0]['source'],
'ID': api_response['data'][0]['id'],
'NumberOfUsers': 0,
'ParentID': api_response['data'][0]['parentId'],
'NumberOfChildGroups': 0
}
return {'Mimecast.Group(val.Name && val.Name == obj.Name)': group_created}
def update_group():
api_response = create_update_group_request()
markdown_output = update_group_api_response_to_markdown(api_response)
entry_context = update_group_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_update_group_request():
api_endpoint = '/api/directory/update-group'
group_name = demisto.args().get('group_name', '').encode('utf-8')
group_id = demisto.args().get('group_id', '').encode('utf-8')
parent_id = demisto.args().get('parent_id', '').encode('utf-8')
data = {
'id': group_id
}
if group_name:
data['description'] = group_name
if parent_id:
data['parentId'] = parent_id
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def update_group_api_response_to_markdown(api_response):
group_name = api_response['data'][0]['description']
return group_name + ' has been updated'
def update_group_api_response_to_context(api_response):
group_updated = {
'ID': api_response['data'][0]['id'],
'Name': api_response['data'][0]['description'],
'ParentID': api_response['data'][0]['parentId']
}
return {'Mimecast.Group(val.ID && val.ID == obj.ID)': group_updated}
def create_mimecast_incident():
api_response = create_mimecast_incident_request()
markdown_output = mimecast_incident_api_response_to_markdown(api_response, 'create')
entry_context = mimecast_incident_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_mimecast_incident_request():
api_endpoint = '/api/ttp/remediation/create'
reason = demisto.args().get('reason', '').encode('utf-8')
start_date = demisto.args().get('start_date', '').encode('utf-8')
end_date = demisto.args().get('end_date', '').encode('utf-8')
search_by = demisto.args().get('search_by', 'hash').encode('utf-8')
hash_or_message_id = demisto.args().get('hash_message_id', '').encode('utf-8')
if search_by == 'hash':
get_hash_type(hash_or_message_id)
else:
if not hash_or_message_id.startswith('<'):
hash_or_message_id = '<{}'.format(hash_or_message_id)
if not hash_or_message_id.endswith('>'):
hash_or_message_id = '{}>'.format(hash_or_message_id)
data = {
'reason': reason,
'hashOrMessageId': hash_or_message_id,
'searchBy': search_by
}
if start_date:
data['start'] = start_date
if end_date:
data['end'] = end_date
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def get_mimecast_incident():
api_response = get_mimecast_incident_request()
markdown_output = mimecast_incident_api_response_to_markdown(api_response, 'get')
entry_context = mimecast_incident_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def get_mimecast_incident_request():
api_endpoint = '/api/ttp/remediation/get-incident'
incident_id = demisto.args().get('incident_id', '').encode('utf-8')
data = {
'id': incident_id
}
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def mimecast_incident_api_response_to_markdown(api_response, action_type):
incident_code = api_response['data'][0]['code']
incident_type = api_response['data'][0]['type']
incident_reason = api_response['data'][0]['reason']
incident_identified_messages_amount = api_response['data'][0]['Identified']
incident_successful_messages_amount = api_response['data'][0]['Successful']
incident_failed_messages_amount = api_response['data'][0]['Failed']
incident_restored_messages_amount = api_response['data'][0]['Restored']
incident_id = api_response['data'][0]['id']
if action_type == 'create':
md = 'Incident ' + incident_id + ' has been created\n'
else:
md = 'Incident ' + incident_id + ' has been found\n'
md_metadata = '####Code: ' + incident_code
md_metadata += '\n####Type: ' + incident_type
md_metadata += '\n####Reason: ' + incident_reason
md_metadata += '\n####The number of messages identified based on the search criteria: ' + incident_identified_messages_amount
md_metadata += '\n####The number successfully remediated messages: ' + incident_successful_messages_amount
md_metadata += '\n####The number of messages that failed to remediate: ' + incident_failed_messages_amount
md_metadata += '\n####The number of messages that were restored from the incident: ' + incident_restored_messages_amount
messages_table_list = list()
for message in api_response['data'][0]['searchCriteria']:
message_entry = {
'From': message['from'],
'To': message['to'],
'Start date': datetime.strptime(message['start'], '%Y-%m-%dT%H:%M:%SZ'),
'End date': datetime.strptime(message['end'], '%Y-%m-%dT%H:%M:%SZ'),
'Message ID': message['messageId'],
'File hash': message['fileHash']
}
messages_table_list.append(message_entry)
md = tableToMarkdown(md, messages_table_list,
['From', 'To', 'Start', 'End date', 'Message ID', 'File hash'], metadata=md_metadata)
return md
def mimecast_incident_api_response_to_context(api_response):
messages_table_list = list()
for message in api_response['data'][0]['searchCriteria']:
message_entry = {
'From': message['from'],
'To': message['to'],
'MessageID': message['messageId'],
'FileHash': message['fileHash'],
'StartDate': datetime.strptime(message['start'], '%Y-%m-%dT%H:%M:%SZ'),
'EndDate': datetime.strptime(message['end'], '%Y-%m-%dT%H:%M:%SZ')
}
messages_table_list.append(message_entry)
incident_created = {
'ID': api_response['data'][0]['id'],
'Code': api_response['data'][0]['code'],
'Type': api_response['data'][0]['type'],
'Reason': api_response['data'][0]['reason'],
'IdentifiedMessages': api_response['data'][0]['identified'],
'SuccessfullyRemediatedMessages': api_response['data'][0]['successful'],
'FailedRemediatedMessages': api_response['data'][0]['failed'],
'MessagesRestored': api_response['data'][0]['restored'],
'LastModified': datetime.strptime(api_response['data'][0]['modified'], '%Y-%m-%dT%H:%M:%SZ'),
'SearchCriteria': messages_table_list
}
return {'Mimecast.Incident(val.ID && val.ID == obj.ID)': incident_created}
def search_file_hash():
api_response = create_search_file_hash_request()
markdown_output = search_file_hash_api_response_to_markdown(api_response)
entry_context = search_file_hash_api_response_to_context(api_response)
return_outputs(markdown_output, entry_context, api_response)
def create_search_file_hash_request():
api_endpoint = '/api/ttp/remediation/search-hash'
hashes_to_search = argToList(demisto.args().get('hashes_to_search').encode('utf-8'))
data = {
'hashes': hashes_to_search
}
payload = {
'data': [data]
}
response = http_request('POST', api_endpoint, str(payload))
if isinstance(response, dict) and response.get('fail'):
return_error(json.dumps(response.get('fail', [{}])[0].get('errors')))
return response
def search_file_hash_api_response_to_markdown(api_response):
md = 'Hashes detected:\n'
detected_hashes_list = list()
for detected_hash in api_response['data'][0]['hashStatus']:
detected_hash_entry = {
'Hash': detected_hash['hash'],
'Found within the account': detected_hash['detected']
}
detected_hashes_list.append(detected_hash_entry)
md = tableToMarkdown(md, detected_hashes_list, ['Hash', 'Found within the account'])
md += '### Hashes that failed verification:\n'
failed_hash_list = [str(failed_hash) for failed_hash in api_response['data'][0]['failedHashes']]
md += str(failed_hash_list)[1:-1] + '\n'
return md
def search_file_hash_api_response_to_context(api_response):
detected_hashes_list = list()
for detected_hash in api_response['data'][0]['hashStatus']:
detected_hash_entry = {
'HashValue': detected_hash['hash'],
'Detected': detected_hash['detected']
}
detected_hashes_list.append(detected_hash_entry)
if detected_hashes_list:
return {'Mimecast.Hash(val.HashValue && val.HashValue == obj.HashValue)': detected_hashes_list}
return None
def main():
''' COMMANDS MANAGER / SWITCH PANEL '''
# Check if token needs to be refresh, if it does and relevant params are set, refresh.
try:
handle_proxy()
determine_ssl_usage()
if ACCESS_KEY:
auto_refresh_token()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'mimecast-query':
demisto.results(query())
elif demisto.command() == 'mimecast-list-blocked-sender-policies':
demisto.results(get_policy())
elif demisto.command() == 'mimecast-get-policy':
demisto.results(get_policy())
elif demisto.command() == 'mimecast-create-policy':
demisto.results(create_policy())
elif demisto.command() == 'mimecast-update-policy':
demisto.results(update_policy())
elif demisto.command() == 'mimecast-delete-policy':
demisto.results(delete_policy())
elif demisto.command() == 'mimecast-manage-sender':
demisto.results(manage_sender())
elif demisto.command() == 'mimecast-list-managed-url':
demisto.results(list_managed_url())
elif demisto.command() == 'mimecast-create-managed-url':
demisto.results(create_managed_url())
elif demisto.command() == 'mimecast-list-messages':
demisto.results(list_messages())
elif demisto.command() == 'mimecast-get-attachment-logs':
demisto.results(get_attachment_logs())
elif demisto.command() == 'mimecast-get-url-logs':
demisto.results(get_url_logs())
elif demisto.command() == 'mimecast-get-impersonation-logs':
demisto.results(get_impersonation_logs())
elif demisto.command() == 'mimecast-url-decode':
demisto.results(url_decode())
elif demisto.command() == 'mimecast-discover':
demisto.results(discover())
elif demisto.command() == 'mimecast-login':
demisto.results(login())
elif demisto.command() == 'mimecast-refresh-token':
demisto.results(refresh_token())
elif demisto.command() == 'mimecast-get-message':
demisto.results(get_message())
elif demisto.command() == 'mimecast-download-attachments':
demisto.results(download_attachment())
elif demisto.command() == 'mimecast-find-groups':
find_groups()
elif demisto.command() == 'mimecast-get-group-members':
get_group_members()
elif demisto.command() == 'mimecast-add-group-member':
add_remove_member_to_group('add')
elif demisto.command() == 'mimecast-remove-group-member':
add_remove_member_to_group('remove')
elif demisto.command() == 'mimecast-create-group':
create_group()
elif demisto.command() == 'mimecast-update-group':
update_group()
elif demisto.command() == 'mimecast-create-remediation-incident':
create_mimecast_incident()
elif demisto.command() == 'mimecast-get-remediation-incident':
get_mimecast_incident()
elif demisto.command() == 'mimecast-search-file-hash':
search_file_hash()
except Exception as e:
LOG(e.message)
LOG.print_log()
return_error(e.message)
if __name__ in ('__builtin__', 'builtins'):
main()
| 34.935177 | 130 | 0.609525 |
be909172491eb801eabd9f9e5fc9232c025a2b1b | 3,348 | py | Python | beerpy/units/gravity.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | beerpy/units/gravity.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | beerpy/units/gravity.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | """
Functions for converting gravity units. Source:
http://www.brewersfriend.com/plato-to-sg-conversion-chart
"""
import os
import pandas as pd
from scipy.interpolate import interp1d
from ..utilities import datadir
FCT_DATA = "data"
FCT_POLY = "poly"
PLATO = "°P"
SPECIFIC_GRAVITY = "kg/m³"
_units = (PLATO, SPECIFIC_GRAVITY)
_f_gravity = os.path.join(datadir(), "gravity.csv")
_df_gravity = pd.read_csv(_f_gravity, sep=',', decimal='.')
_plato = list(_df_gravity.Plato)
_sg = list(_df_gravity.SG)
# polynomical functions
def _poly_pl_to_sg(pl):
"""
Polynom for calculating sg from pl.
"""
return 1.0 + (pl / (258.6 - ((pl / 258.2) * 227.1)))
def _poly_sg_to_pl(sg):
"""
Polynom for calculating pl from sg.
"""
return -616.868 + 1111.14 * sg - 630.272 * sg**2 + 135.997 * sg ** 3
def _data_pl_to_sg(pl):
"""
Use data table and linear interpolation for calculating sg from pl.
"""
f = interp1d(_plato, _sg)
return float(f(pl))
def _data_sg_to_pl(sg):
"""
Use data table and linear interpolation for calculating pl from sg.
"""
f = interp1d(_sg, _plato)
return float(f(sg))
def _interpolate(xdata, ydata, xval):
"""
Interpolate between given points.
"""
f = interp1d(xdata, ydata)
return f(xval)
def _pl_to_sg(pl, fct=FCT_DATA):
"""
Calculate specific gravity from °Pl.
:param pl: gravity in °Pl
:returns: specific gravity (SPECIFIC_GRAVITY)
"""
if fct == FCT_DATA:
return _data_pl_to_sg(pl)
elif fct == FCT_POLY:
return _poly_pl_to_sg(pl)
else:
raise ValueError("value for parameter fct is not valid.")
def _sg_to_pl(sg, fct=FCT_DATA):
"""
Calculate °Pl from specific gravity.
:param sg: specific gravity (SPECIFIC_GRAVITY)
:returns: gravity in °Pl
"""
if fct == FCT_DATA:
return _data_sg_to_pl(sg)
elif fct == FCT_POLY:
return _poly_sg_to_pl(sg)
else:
raise ValueError("value for parameter fct is not valid.")
class Gravity:
def __init__(self, value, unit=PLATO):
self.value = value
self._unit = unit
assert unit in _units, "unit parameter not in {}".format(_units)
def __repr__(self):
return "Gravity: {}{}".format(self.value, self.unit)
@property
def unit(self):
return self._unit
# pl property
@property
def plato(self):
"""
get / set value of the gravity in °Pl
"""
if self.unit == PLATO:
return self.value
elif self.unit == SPECIFIC_GRAVITY:
return _sg_to_pl(self.value)
@plato.setter
def plato(self, value):
if self.unit == PLATO:
self.value = value
elif self.unit == SPECIFIC_GRAVITY:
self.value = _pl_to_sg(value)
# sg property
@property
def specific_gravity(self):
"""
get / set value of the gravity in kg/m³
"""
if self.unit == PLATO:
return _pl_to_sg(self.value)
elif self.unit == SPECIFIC_GRAVITY:
return self.value
@specific_gravity.setter
def specific_gravity(self, value):
if self.unit == PLATO:
self.value = _sg_to_pl(value)
elif self.unit == SPECIFIC_GRAVITY:
self.value = value
| 21.324841 | 72 | 0.611111 |
c859559e50d1e070a71d595248e1a816a6121a24 | 3,710 | py | Python | Benchmarking/Util/dataset_processing.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | 1 | 2021-04-02T15:33:12.000Z | 2021-04-02T15:33:12.000Z | Benchmarking/Util/dataset_processing.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | null | null | null | Benchmarking/Util/dataset_processing.py | cristi161/eecvf | 519c488bd47f697ef51e88823f7a751a52677b88 | [
"MIT"
] | 1 | 2021-08-14T09:07:22.000Z | 2021-08-14T09:07:22.000Z | import os
import shutil
from scipy.io import loadmat, savemat
# noinspection PyPackageRequirements
import cv2
import numpy as np
def find_gt_from_files_copy(img_location, gt_folder, gt_new_folder):
"""
Function finds the equivalent ground truth images from a file in the gt folder
:param img_location: folder with input images
:param gt_folder: gt images/mat folder
:param gt_new_folder: folder where to copy
:return: None
"""
files = []
gt_files = []
for dirname, dirnames, filenames in os.walk(img_location):
for filename in filenames:
files.append(filename)
print('files = ', files)
print('len(files) = ', len(files))
for dirname, dirnames, filenames in os.walk(gt_folder):
for filename in filenames:
gt_files.append(dirname + '/' + filename)
print('gt_files = ', gt_files)
print('len(gt_files) = ', len(gt_files))
count = 0
for file in files:
for gt_file in gt_files:
name = file.split('.')[0]
gt_name = (gt_file.split('.'))[0].split('/')[-1]
if name == gt_name:
shutil.copy2(gt_file, gt_new_folder)
print(gt_file)
count += 1
print('count = ', count)
def show_mat_picture(location, save_pict, show_image):
images = loadmat(location, variable_names='groundTruth', appendmat=True).get('groundTruth')[0]
h = len(images[0][0][0][0])
w = len(images[0][0][0][0][0])
full = np.zeros((h, w), np.float32)
for i in range(len(images)):
border_image_tmp = np.zeros((h, w), np.float32) + np.array(images[i][0][0][1])*255
border_image_tmp = np.array(border_image_tmp, np.uint8)
full += border_image_tmp
if save_pict:
image_name = str(location.split('\\')[-1]).split('.')[0] + '_' + str(i) + '.png'
path = os.path.join(os.getcwd(), '../../', 'Logs', 'gt_images', image_name)
cv2.imwrite(path, border_image_tmp)
if show_image:
cv2.imshow(str(i), border_image_tmp)
full = cv2.normalize(full, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
if save_pict:
image_name = str(location.split('\\')[-1]).split('.')[0] + '_' + 'all' + '.png'
path = os.path.join(os.getcwd(), '../../', 'Logs', 'gt_images', image_name)
cv2.imwrite(path, full)
if show_image:
cv2.imshow('all', full)
if show_image:
cv2.waitKey(500)
def save_gt_images_in_logs(folder):
files = []
for dirname, dirnames, filenames in os.walk(folder):
for filename in filenames:
files.append(filename)
print(files)
for image in files:
show_mat_picture(os.path.join(folder, image), True, False)
def create_mat_gt_bsds500_from_png(input_location, save_location):
files = []
for dirname, dirnames, filenames in os.walk(input_location):
for filename in filenames:
files.append(filename)
for file in files:
gt_image = cv2.cvtColor(cv2.imread(os.path.join(input_location, file)), cv2.COLOR_BGR2GRAY)//255
gt = []
gt.append({'Segmentation': np.zeros(gt_image.shape, dtype=np.dtype('H')), 'Boundaries': gt_image})
savemat(os.path.join(save_location, file.split('.')[0] + '.mat'), mdict={'groundTruth': gt})
if __name__ == "__main__":
create_mat_gt_bsds500_from_png(
input_location='C:/repos/eecvf/TestData/CM_Dataset/gt_png',
save_location='C:/repos/eecvf/TestData/CM_Dataset/gt_mat',
)
| 33.423423 | 107 | 0.597844 |
7a6484a29f75a1d2a3315b5d3fab8a8e9b33a110 | 158 | py | Python | rainbowchar/__init__.py | roderiano/rainbow-char-lib | 5acc579ec4cf5d073bf087eebac6809e1912c966 | [
"MIT"
] | null | null | null | rainbowchar/__init__.py | roderiano/rainbow-char-lib | 5acc579ec4cf5d073bf087eebac6809e1912c966 | [
"MIT"
] | null | null | null | rainbowchar/__init__.py | roderiano/rainbow-char-lib | 5acc579ec4cf5d073bf087eebac6809e1912c966 | [
"MIT"
] | null | null | null | from rainbowchar.rainbow import paint
from rainbowchar.utils import convert_hex_color_to_rgb
from rainbowchar.exceptions import InvalidRainbowHexadecimalColor | 52.666667 | 65 | 0.911392 |
526df34048fce2159a21be3cd21a7a3440dec711 | 8,009 | py | Python | pytorch_wavelets/dwt/transform2d.py | hologerry/pytorch_wavelets | b1418c87f8151f94c2fecc5b31ae2fb7ea31cc03 | [
"MIT"
] | null | null | null | pytorch_wavelets/dwt/transform2d.py | hologerry/pytorch_wavelets | b1418c87f8151f94c2fecc5b31ae2fb7ea31cc03 | [
"MIT"
] | null | null | null | pytorch_wavelets/dwt/transform2d.py | hologerry/pytorch_wavelets | b1418c87f8151f94c2fecc5b31ae2fb7ea31cc03 | [
"MIT"
] | null | null | null | import torch.nn as nn
import pywt
import pytorch_wavelets.dwt.lowlevel as lowlevel
import torch
torch.autograd.set_detect_anomaly(True)
class DWTForward(nn.Module):
""" Performs a 2d DWT Forward decomposition of an image
Args:
J (int): Number of levels of decomposition
wave (str or pywt.Wavelet): Which wavelet to use. Can be a string to
pass to pywt.Wavelet constructor, can also be a pywt.Wavelet class,
or can be a two tuple of array-like objects for the analysis low and
high pass filters.
mode (str): 'zero', 'symmetric', 'reflect' or 'periodization'. The
padding scheme
separable (bool): whether to do the filtering separably or not (the
naive implementation can be faster on a gpu).
"""
def __init__(self, J=1, wave='db1', mode='zero'):
super().__init__()
if isinstance(wave, str):
wave = pywt.Wavelet(wave)
if isinstance(wave, pywt.Wavelet):
h0_col, h1_col = wave.dec_lo, wave.dec_hi
h0_row, h1_row = h0_col, h1_col
else:
if len(wave) == 2:
h0_col, h1_col = wave[0], wave[1]
h0_row, h1_row = h0_col, h1_col
elif len(wave) == 4:
h0_col, h1_col = wave[0], wave[1]
h0_row, h1_row = wave[2], wave[3]
# Prepare the filters
filts = lowlevel.prep_filt_afb2d(h0_col, h1_col, h0_row, h1_row)
self.register_buffer('h0_col', filts[0])
self.register_buffer('h1_col', filts[1])
self.register_buffer('h0_row', filts[2])
self.register_buffer('h1_row', filts[3])
self.J = J
self.mode = mode
def forward(self, x):
""" Forward pass of the DWT.
Args:
x (tensor): Input of shape :math:`(N, C_{in}, H_{in}, W_{in})`
Returns:
(yl, yh)
tuple of lowpass (yl) and bandpass (yh)
coefficients. yh is a list of length J with the first entry
being the finest scale coefficients. yl has shape
:math:`(N, C_{in}, H_{in}', W_{in}')` and yh has shape
:math:`list(N, C_{in}, 3, H_{in}'', W_{in}'')`. The new
dimension in yh iterates over the LH, HL and HH coefficients.
Note:
:math:`H_{in}', W_{in}', H_{in}'', W_{in}''` denote the correctly
downsampled shapes of the DWT pyramid.
"""
yh = []
ll = x
mode = lowlevel.mode_to_int(self.mode)
# Do a multilevel transform
for j in range(self.J):
# Do 1 level of the transform
ll, high = lowlevel.AFB2D.apply(ll, self.h0_col.clone(), self.h1_col.clone(), self.h0_row.clone(), self.h1_row.clone(), mode)
yh.append(high)
return ll, yh
class DWTInverse(nn.Module):
""" Performs a 2d DWT Inverse reconstruction of an image
Args:
wave (str or pywt.Wavelet): Which wavelet to use
C: deprecated, will be removed in future
"""
def __init__(self, wave='db1', mode='zero'):
super().__init__()
if isinstance(wave, str):
wave = pywt.Wavelet(wave)
if isinstance(wave, pywt.Wavelet):
g0_col, g1_col = wave.rec_lo, wave.rec_hi
g0_row, g1_row = g0_col, g1_col
else:
if len(wave) == 2:
g0_col, g1_col = wave[0], wave[1]
g0_row, g1_row = g0_col, g1_col
elif len(wave) == 4:
g0_col, g1_col = wave[0], wave[1]
g0_row, g1_row = wave[2], wave[3]
# Prepare the filters
filts = lowlevel.prep_filt_sfb2d(g0_col, g1_col, g0_row, g1_row)
self.register_buffer('g0_col', filts[0])
self.register_buffer('g1_col', filts[1])
self.register_buffer('g0_row', filts[2])
self.register_buffer('g1_row', filts[3])
self.mode = mode
def forward(self, coeffs):
"""
Args:
coeffs (yl, yh): tuple of lowpass and bandpass coefficients, where:
yl is a lowpass tensor of shape :math:`(N, C_{in}, H_{in}',
W_{in}')` and yh is a list of bandpass tensors of shape
:math:`list(N, C_{in}, 3, H_{in}'', W_{in}'')`. I.e. should match
the format returned by DWTForward
Returns:
Reconstructed input of shape :math:`(N, C_{in}, H_{in}, W_{in})`
Note:
:math:`H_{in}', W_{in}', H_{in}'', W_{in}''` denote the correctly
downsampled shapes of the DWT pyramid.
Note:
Can have None for any of the highpass scales and will treat the
values as zeros (not in an efficient way though).
"""
yl, yh = coeffs
ll_prev = yl
mode = lowlevel.mode_to_int(self.mode)
# Do a multilevel inverse transform
for h in yh[::-1]:
if h is None:
h = torch.zeros(ll_prev.shape[0], ll_prev.shape[1], 3, ll_prev.shape[-2],
ll_prev.shape[-1], device=ll_prev.device)
# 'Unpad' added dimensions
if ll_prev.shape[-2] > h.shape[-2]:
ll_prev = ll_prev[..., :-1, :].clone()
if ll_prev.shape[-1] > h.shape[-1]:
ll_prev = ll_prev[..., :-1].clone()
ll_cur = lowlevel.SFB2D.apply(ll_prev, h, self.g0_col.clone(), self.g1_col.clone(), self.g0_row.clone(), self.g1_row.clone(), mode)
ll_prev = ll_cur
return ll_prev
class SWTForward(nn.Module):
""" Performs a 2d Stationary wavelet transform (or undecimated wavelet
transform) of an image
Args:
J (int): Number of levels of decomposition
wave (str or pywt.Wavelet): Which wavelet to use. Can be a string to
pass to pywt.Wavelet constructor, can also be a pywt.Wavelet class,
or can be a two tuple of array-like objects for the analysis low and
high pass filters.
mode (str): 'zero', 'symmetric', 'reflect' or 'periodization'. The
padding scheme. PyWavelets uses only periodization so we use this
as our default scheme.
"""
def __init__(self, J=1, wave='db1', mode='periodization'):
super().__init__()
if isinstance(wave, str):
wave = pywt.Wavelet(wave)
if isinstance(wave, pywt.Wavelet):
h0_col, h1_col = wave.dec_lo, wave.dec_hi
h0_row, h1_row = h0_col, h1_col
else:
if len(wave) == 2:
h0_col, h1_col = wave[0], wave[1]
h0_row, h1_row = h0_col, h1_col
elif len(wave) == 4:
h0_col, h1_col = wave[0], wave[1]
h0_row, h1_row = wave[2], wave[3]
# Prepare the filters
filts = lowlevel.prep_filt_afb2d(h0_col, h1_col, h0_row, h1_row)
self.register_buffer('h0_col', filts[0])
self.register_buffer('h1_col', filts[1])
self.register_buffer('h0_row', filts[2])
self.register_buffer('h1_row', filts[3])
self.J = J
self.mode = mode
def forward(self, x):
""" Forward pass of the SWT.
Args:
x (tensor): Input of shape :math:`(N, C_{in}, H_{in}, W_{in})`
Returns:
List of coefficients for each scale. Each coefficient has
shape :math:`(N, C_{in}, 4, H_{in}, W_{in})` where the extra
dimension stores the 4 subbands for each scale. The ordering in
these 4 coefficients is: (A, H, V, D) or (ll, lh, hl, hh).
"""
ll = x
coeffs = []
# Do a multilevel transform
filts = (self.h0_col, self.h1_col, self.h0_row, self.h1_row)
for j in range(self.J):
# Do 1 level of the transform
y = lowlevel.afb2d_atrous(ll, filts, self.mode, 2**j)
coeffs.append(y)
ll = y[:, :, 0].clone()
return coeffs
| 38.320574 | 143 | 0.556249 |
0e6e1ae2206a9fe448cd86fce7c89eada6924a38 | 85,081 | py | Python | fragbuilder/bio_pdb/Seq.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | fragbuilder/bio_pdb/Seq.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | fragbuilder/bio_pdb/Seq.py | larsbratholm/fragbuilder | e16cbcb190403b5fef49811abd11d16d7ef7fb30 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2000-2002 Brad Chapman.
# Copyright 2004-2005 by M de Hoon.
# Copyright 2007-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides objects to represent biological sequences with alphabets.
See also U{http://biopython.org/wiki/Seq} and the chapter in our tutorial:
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.html}
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.pdf}
"""
from __future__ import print_function
__docformat__ ="epytext en" #Don't just use plain text in epydoc API pages!
import string #for maketrans only
import array
import sys
from . import Alphabet
from .Alphabet import IUPAC
from .Alphabet.IUPACData import ambiguous_dna_complement, ambiguous_rna_complement
from .Alphabet import CodonTable
def _maketrans(complement_mapping):
"""Makes a python string translation table (PRIVATE).
Arguments:
- complement_mapping - a dictionary such as ambiguous_dna_complement
and ambiguous_rna_complement from Data.IUPACData.
Returns a translation table (a string of length 256) for use with the
python string's translate method to use in a (reverse) complement.
Compatible with lower case and upper case sequences.
For internal use only.
"""
before = ''.join(complement_mapping.keys())
after = ''.join(complement_mapping.values())
before = before + before.lower()
after = after + after.lower()
if sys.version_info[0] == 3 :
return str.maketrans(before, after)
else:
return string.maketrans(before, after)
_dna_complement_table = _maketrans(ambiguous_dna_complement)
_rna_complement_table = _maketrans(ambiguous_rna_complement)
class Seq(object):
"""A read-only sequence object (essentially a string with an alphabet).
Like normal python strings, our basic sequence object is immutable.
This prevents you from doing my_seq[5] = "A" for example, but does allow
Seq objects to be used as dictionary keys.
The Seq object provides a number of string like methods (such as count,
find, split and strip), which are alphabet aware where appropriate.
In addition to the string like sequence, the Seq object has an alphabet
property. This is an instance of an Alphabet class from Bio.Alphabet,
for example generic DNA, or IUPAC DNA. This describes the type of molecule
(e.g. RNA, DNA, protein) and may also indicate the expected symbols
(letters).
The Seq object also provides some biological methods, such as complement,
reverse_complement, transcribe, back_transcribe and translate (which are
not applicable to sequences with a protein alphabet).
"""
def __init__(self, data, alphabet = Alphabet.generic_alphabet):
"""Create a Seq object.
Arguments:
- seq - Sequence, required (string)
- alphabet - Optional argument, an Alphabet object from Bio.Alphabet
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects, whose sequence will be exposed as a Seq object via
the seq property.
However, will often want to create your own Seq objects directly:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein)
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
>>> print(my_seq)
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> my_seq.alphabet
IUPACProtein()
"""
# Enforce string storage
if not isinstance(data, basestring):
raise TypeError("The sequence data given to a Seq object should "
"be a string (not another Seq object etc)")
self._data = data
self.alphabet = alphabet # Seq API requirement
# A data property is/was a Seq API requirement
# Note this is read only since the Seq object is meant to be imutable
@property
def data(self) :
"""Sequence as a string (DEPRECATED).
This is a read only property provided for backwards compatility with
older versions of Biopython (as is the tostring() method). We now
encourage you to use str(my_seq) instead of my_seq.data or the method
my_seq.tostring().
In recent releases of Biopython it was possible to change a Seq object
by updating its data property, but this triggered a deprecation warning.
Now the data property is read only, since Seq objects are meant to be
immutable:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_seq = Seq("ACGT", generic_dna)
>>> str(my_seq) == my_seq.tostring() == "ACGT"
True
>>> my_seq.data = "AAAA"
Traceback (most recent call last):
...
AttributeError: can't set attribute
"""
import warnings
import Bio
warnings.warn("Accessing the .data attribute is deprecated. Please "
"use str(my_seq) or my_seq.tostring() instead of "
"my_seq.data.", Bio.BiopythonDeprecationWarning)
return str(self)
def __repr__(self):
"""Returns a (truncated) representation of the sequence for debugging."""
if len(self) > 60:
#Shows the last three letters as it is often useful to see if there
#is a stop codon at the end of a sequence.
#Note total length is 54+3+3=60
return "%s('%s...%s', %s)" % (self.__class__.__name__,
str(self)[:54], str(self)[-3:],
repr(self.alphabet))
else:
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._data),
repr(self.alphabet))
def __str__(self):
"""Returns the full sequence as a python string, use str(my_seq).
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which need to be backwards compatible with old Biopython, you
should continue to use my_seq.tostring() rather than str(my_seq).
"""
return self._data
def __hash__(self):
"""Hash for comparison.
See the __cmp__ documentation - we plan to change this!
"""
return id(self) #Currently use object identity for equality testing
def __cmp__(self, other):
"""Compare the sequence to another sequence or a string (README).
Historically comparing Seq objects has done Python object comparison.
After considerable discussion (keeping in mind constraints of the
Python language, hashes and dictionary support) a future release of
Biopython will change this to use simple string comparison. The plan is
that comparing incompatible alphabets (e.g. DNA to RNA) will trigger a
warning.
This version of Biopython still does Python object comparison, but with
a warning about this future change. During this transition period,
please just do explicit comparisons:
>>> seq1 = Seq("ACGT")
>>> seq2 = Seq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
Note - This method indirectly supports ==, < , etc.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
import warnings
warnings.warn("In future comparing Seq objects will use string "
"comparison (not object comparison). Incompatible "
"alphabets will trigger a warning (not an exception). "
"In the interim please use id(seq1)==id(seq2) or "
"str(seq1)==str(seq2) to make your code explicit "
"and to avoid this warning.", FutureWarning)
return cmp(id(self), id(other))
def __len__(self):
"""Returns the length of the sequence, use len(my_seq)."""
return len(self._data) # Seq API requirement
def __getitem__(self, index) : # Seq API requirement
"""Returns a subsequence of single letter, use my_seq[index]."""
#Note since Python 2.0, __getslice__ is deprecated
#and __getitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Return a single letter as a string
return self._data[index]
else:
#Return the (sub)sequence as another Seq object
return Seq(self._data[index], self.alphabet)
def __add__(self, other):
"""Add another sequence or string to this sequence.
If adding a string to a Seq, the alphabet is preserved:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> Seq("MELKI", generic_protein) + "LV"
Seq('MELKILV', ProteinAlphabet())
When adding two Seq (like) objects, the alphabets are important.
Consider this example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet.IUPAC import unambiguous_dna, ambiguous_dna
>>> unamb_dna_seq = Seq("ACGT", unambiguous_dna)
>>> ambig_dna_seq = Seq("ACRGT", ambiguous_dna)
>>> unamb_dna_seq
Seq('ACGT', IUPACUnambiguousDNA())
>>> ambig_dna_seq
Seq('ACRGT', IUPACAmbiguousDNA())
If we add the ambiguous and unambiguous IUPAC DNA alphabets, we get
the more general ambiguous IUPAC DNA alphabet:
>>> unamb_dna_seq + ambig_dna_seq
Seq('ACGTACRGT', IUPACAmbiguousDNA())
However, if the default generic alphabet is included, the result is
a generic alphabet:
>>> Seq("") + ambig_dna_seq
Seq('ACRGT', Alphabet())
You can't add RNA and DNA sequences:
>>> from Bio.Alphabet import generic_dna, generic_rna
>>> Seq("ACGT", generic_dna) + Seq("ACGU", generic_rna)
Traceback (most recent call last):
...
TypeError: Incompatible alphabets DNAAlphabet() and RNAAlphabet()
You can't add nucleotide and protein sequences:
>>> from Bio.Alphabet import generic_dna, generic_protein
>>> Seq("ACGT", generic_dna) + Seq("MELKI", generic_protein)
Traceback (most recent call last):
...
TypeError: Incompatible alphabets DNAAlphabet() and ProteinAlphabet()
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatible alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
return self.__class__(str(self) + str(other), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(self) + other, self.alphabet)
from Bio.SeqRecord import SeqRecord #Lazy to avoid circular imports
if isinstance(other, SeqRecord):
#Get the SeqRecord's __radd__ to handle this
return NotImplemented
else :
raise TypeError
def __radd__(self, other):
"""Adding a sequence on the left.
If adding a string to a Seq, the alphabet is preserved:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> "LV" + Seq("MELKI", generic_protein)
Seq('LVMELKI', ProteinAlphabet())
Adding two Seq (like) objects is handled via the __add__ method.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
return self.__class__(str(other) + str(self), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(other + str(self), self.alphabet)
else:
raise TypeError
def tostring(self): # Seq API requirement
"""Returns the full sequence as a python string (semi-obsolete).
Although not formally deprecated, you are now encouraged to use
str(my_seq) instead of my_seq.tostring()."""
#TODO - Fix all places elsewhere in Biopython using this method,
#then start deprecation process?
#import warnings
#warnings.warn("This method is obsolete; please use str(my_seq) "
# "instead of my_seq.tostring().",
# PendingDeprecationWarning)
return str(self)
def tomutable(self): # Needed? Or use a function?
"""Returns the full sequence as a MutableSeq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAAL",
... IUPAC.protein)
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
>>> my_seq.tomutable()
MutableSeq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
Note that the alphabet is preserved.
"""
return MutableSeq(str(self), self.alphabet)
def _get_seq_str_and_check_alphabet(self, other_sequence):
"""string/Seq/MutableSeq to string, checking alphabet (PRIVATE).
For a string argument, returns the string.
For a Seq or MutableSeq, it checks the alphabet is compatible
(raising an exception if it isn't), and then returns a string.
"""
try:
other_alpha = other_sequence.alphabet
except AttributeError:
#Assume other_sequence is a string
return other_sequence
#Other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet, other_alpha]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other_alpha)))
#Return as a string
return str(other_sequence)
def count(self, sub, start=0, end=sys.maxsize):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("AAAATGA")
>>> print(my_seq.count("A"))
5
>>> print(my_seq.count("ATG"))
1
>>> print(my_seq.count(Seq("AT")))
1
>>> print(my_seq.count("AT", 2, -1))
1
HOWEVER, please note because python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print(Seq("AAAA").count("AA"))
2
A non-overlapping search would give the answer as three!
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).count(sub_str, start, end)
def __contains__(self, char):
"""Implements the 'in' keyword, like a python string.
e.g.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna, generic_rna, generic_protein
>>> my_dna = Seq("ATATGAAATTTGAAAA", generic_dna)
>>> "AAA" in my_dna
True
>>> Seq("AAA") in my_dna
True
>>> Seq("AAA", generic_dna) in my_dna
True
Like other Seq methods, this will raise a type error if another Seq
(or Seq like) object with an incompatible alphabet is used:
>>> Seq("AAA", generic_rna) in my_dna
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and RNAAlphabet()
>>> Seq("AAA", generic_protein) in my_dna
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and ProteinAlphabet()
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(char)
return sub_str in str(self)
def find(self, sub, start=0, end=sys.maxsize):
"""Find method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the first occurrence of substring
argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the first typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.find("AUG")
3
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).find(sub_str, start, end)
def rfind(self, sub, start=0, end=sys.maxsize):
"""Find from right method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the last (right most) occurrence of
substring argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the last typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.rfind("AUG")
15
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).rfind(sub_str, start, end)
def startswith(self, prefix, start=0, end=sys.maxsize):
"""Does the Seq start with the given prefix? Returns True/False.
This behaves like the python string method of the same name.
Return True if the sequence starts with the specified prefix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
prefix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.startswith("GUC")
True
>>> my_rna.startswith("AUG")
False
>>> my_rna.startswith("AUG", 3)
True
>>> my_rna.startswith(("UCC","UCA","UCG"),1)
True
"""
#If it has one, check the alphabet:
if isinstance(prefix, tuple):
#TODO - Once we drop support for Python 2.4, instead of this
#loop offload to the string method (requires Python 2.5+).
#Check all the alphabets first...
prefix_strings = [self._get_seq_str_and_check_alphabet(p) \
for p in prefix]
for prefix_str in prefix_strings:
if str(self).startswith(prefix_str, start, end):
return True
return False
else:
prefix_str = self._get_seq_str_and_check_alphabet(prefix)
return str(self).startswith(prefix_str, start, end)
def endswith(self, suffix, start=0, end=sys.maxsize):
"""Does the Seq end with the given suffix? Returns True/False.
This behaves like the python string method of the same name.
Return True if the sequence ends with the specified suffix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
suffix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.endswith("UUG")
True
>>> my_rna.endswith("AUG")
False
>>> my_rna.endswith("AUG", 0, 18)
True
>>> my_rna.endswith(("UCC","UCA","UUG"))
True
"""
#If it has one, check the alphabet:
if isinstance(suffix, tuple):
#TODO - Once we drop support for Python 2.4, instead of this
#loop offload to the string method (requires Python 2.5+).
#Check all the alphabets first...
suffix_strings = [self._get_seq_str_and_check_alphabet(p) \
for p in suffix]
for suffix_str in suffix_strings:
if str(self).endswith(suffix_str, start, end):
return True
return False
else:
suffix_str = self._get_seq_str_and_check_alphabet(suffix)
return str(self).endswith(suffix_str, start, end)
def split(self, sep=None, maxsplit=-1):
"""Split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done. If maxsplit is ommited, all
splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_aa = my_rna.translate()
>>> my_aa
Seq('VMAIVMGR*KGAR*L', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> my_aa.split("*")
[Seq('VMAIVMGR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('KGAR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
>>> my_aa.split("*",1)
[Seq('VMAIVMGR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('KGAR*L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
See also the rsplit method:
>>> my_aa.rsplit("*",1)
[Seq('VMAIVMGR*KGAR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
"""
#If it has one, check the alphabet:
sep_str = self._get_seq_str_and_check_alphabet(sep)
#TODO - If the sep is the defined stop symbol, or gap char,
#should we adjust the alphabet?
return [Seq(part, self.alphabet) \
for part in str(self).split(sep_str, maxsplit)]
def rsplit(self, sep=None, maxsplit=-1):
"""Right split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done COUNTING FROM THE RIGHT.
If maxsplit is ommited, all splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g. print(my_seq.rsplit("*",1))
See also the split method.
"""
#If it has one, check the alphabet:
sep_str = self._get_seq_str_and_check_alphabet(sep)
return [Seq(part, self.alphabet) \
for part in str(self).rsplit(sep_str, maxsplit)]
def strip(self, chars=None):
"""Returns a new Seq object with leading and trailing ends stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print(my_seq.strip("-"))
See also the lstrip and rstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).strip(strip_str), self.alphabet)
def lstrip(self, chars=None):
"""Returns a new Seq object with leading (left) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print(my_seq.lstrip("-"))
See also the strip and rstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).lstrip(strip_str), self.alphabet)
def rstrip(self, chars=None):
"""Returns a new Seq object with trailing (right) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. Removing a nucleotide sequence's polyadenylation (poly-A tail):
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAGAAAAAA", IUPAC.unambiguous_dna)
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAGAAAAAA', IUPACUnambiguousDNA())
>>> my_seq.rstrip("A")
Seq('CGGTACGCTTATGTCACGTAG', IUPACUnambiguousDNA())
See also the strip and lstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).rstrip(strip_str), self.alphabet)
def upper(self):
"""Returns an upper case copy of the sequence.
>>> from Bio.Alphabet import HasStopCodon, generic_protein
>>> from Bio.Seq import Seq
>>> my_seq = Seq("VHLTPeeK*", HasStopCodon(generic_protein))
>>> my_seq
Seq('VHLTPeeK*', HasStopCodon(ProteinAlphabet(), '*'))
>>> my_seq.lower()
Seq('vhltpeek*', HasStopCodon(ProteinAlphabet(), '*'))
>>> my_seq.upper()
Seq('VHLTPEEK*', HasStopCodon(ProteinAlphabet(), '*'))
This will adjust the alphabet if required. See also the lower method.
"""
return Seq(str(self).upper(), self.alphabet._upper())
def lower(self):
"""Returns a lower case copy of the sequence.
This will adjust the alphabet if required. Note that the IUPAC alphabets
are upper case only, and thus a generic alphabet must be substituted.
>>> from Bio.Alphabet import Gapped, generic_dna
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAG*AAAAAA", Gapped(IUPAC.unambiguous_dna, "*"))
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAG*AAAAAA', Gapped(IUPACUnambiguousDNA(), '*'))
>>> my_seq.lower()
Seq('cggtacgcttatgtcacgtag*aaaaaa', Gapped(DNAAlphabet(), '*'))
See also the upper method.
"""
return Seq(str(self).lower(), self.alphabet._lower())
def complement(self):
"""Returns the complement sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_dna = Seq("CCCCCGATAG", IUPAC.unambiguous_dna)
>>> my_dna
Seq('CCCCCGATAG', IUPACUnambiguousDNA())
>>> my_dna.complement()
Seq('GGGGGCTATC', IUPACUnambiguousDNA())
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("CCCCCgatA-GD", generic_dna)
>>> my_dna
Seq('CCCCCgatA-GD', DNAAlphabet())
>>> my_dna.complement()
Seq('GGGGGctaT-CH', DNAAlphabet())
Note in the above example, ambiguous character D denotes
G, A or T so its complement is H (for C, T or A).
Trying to complement a protein sequence raises an exception.
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
if isinstance(base, Alphabet.DNAAlphabet):
ttable = _dna_complement_table
elif isinstance(base, Alphabet.RNAAlphabet):
ttable = _rna_complement_table
elif ('U' in self._data or 'u' in self._data) \
and ('T' in self._data or 't' in self._data):
#TODO - Handle this cleanly?
raise ValueError("Mixed RNA/DNA found")
elif 'U' in self._data or 'u' in self._data:
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
#Much faster on really long sequences than the previous loop based one.
#thx to Michael Palmer, University of Waterloo
return Seq(str(self).translate(ttable), self.alphabet)
def reverse_complement(self):
"""Returns the reverse complement sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_dna = Seq("CCCCCGATAGNR", IUPAC.ambiguous_dna)
>>> my_dna
Seq('CCCCCGATAGNR', IUPACAmbiguousDNA())
>>> my_dna.reverse_complement()
Seq('YNCTATCGGGGG', IUPACAmbiguousDNA())
Note in the above example, since R = G or A, its complement
is Y (which denotes C or T).
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("CCCCCgatA-G", generic_dna)
>>> my_dna
Seq('CCCCCgatA-G', DNAAlphabet())
>>> my_dna.reverse_complement()
Seq('C-TatcGGGGG', DNAAlphabet())
Trying to complement a protein sequence raises an exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
"""
#Use -1 stride/step to reverse the complement
return self.complement()[::-1]
def transcribe(self):
"""Returns the RNA sequence from a DNA sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG",
... IUPAC.unambiguous_dna)
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG', IUPACUnambiguousDNA())
>>> coding_dna.transcribe()
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG', IUPACUnambiguousRNA())
Trying to transcribe a protein or RNA sequence raises an exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.transcribe()
Traceback (most recent call last):
...
ValueError: Proteins cannot be transcribed!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be transcribed!")
if isinstance(base, Alphabet.RNAAlphabet):
raise ValueError("RNA cannot be transcribed!")
if self.alphabet==IUPAC.unambiguous_dna:
alphabet = IUPAC.unambiguous_rna
elif self.alphabet==IUPAC.ambiguous_dna:
alphabet = IUPAC.ambiguous_rna
else:
alphabet = Alphabet.generic_rna
return Seq(str(self).replace('T','U').replace('t','u'), alphabet)
def back_transcribe(self):
"""Returns the DNA sequence from an RNA sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG",
... IUPAC.unambiguous_rna)
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG', IUPACUnambiguousRNA())
>>> messenger_rna.back_transcribe()
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG', IUPACUnambiguousDNA())
Trying to back-transcribe a protein or DNA sequence raises an
exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.back_transcribe()
Traceback (most recent call last):
...
ValueError: Proteins cannot be back transcribed!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be back transcribed!")
if isinstance(base, Alphabet.DNAAlphabet):
raise ValueError("DNA cannot be back transcribed!")
if self.alphabet==IUPAC.unambiguous_rna:
alphabet = IUPAC.unambiguous_dna
elif self.alphabet==IUPAC.ambiguous_rna:
alphabet = IUPAC.ambiguous_dna
else:
alphabet = Alphabet.generic_dna
return Seq(str(self).replace("U", "T").replace("u", "t"), alphabet)
def translate(self, table="Standard", stop_symbol="*", to_stop=False,
cds=False):
"""Turns a nucleotide sequence into a protein sequence. New Seq object.
This method will translate DNA or RNA sequences, and those with a
nucleotide or generic alphabet. Trying to translate a protein
sequence raises an exception.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable
object (useful for non-standard genetic codes). This
defaults to the "Standard" table.
- stop_symbol - Single character string, what to use for terminators.
This defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full translation
continuing on past any stop codons (translated as the
specified stop_symbol). If True, translation is
terminated at the first in frame stop codon (and the
stop_symbol is not appended to the returned protein
sequence).
- cds - Boolean, indicates this is a complete CDS. If True,
this checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
e.g. Using the standard table:
>>> coding_dna = Seq("GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna.translate()
Seq('VAIVMGR*KGAR*', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> coding_dna.translate(stop_symbol="@")
Seq('VAIVMGR@KGAR@', HasStopCodon(ExtendedIUPACProtein(), '@'))
>>> coding_dna.translate(to_stop=True)
Seq('VAIVMGR', ExtendedIUPACProtein())
Now using NCBI table 2, where TGA is not a stop codon:
>>> coding_dna.translate(table=2)
Seq('VAIVMGRWKGAR*', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> coding_dna.translate(table=2, to_stop=True)
Seq('VAIVMGRWKGAR', ExtendedIUPACProtein())
In fact, GTG is an alternative start codon under NCBI table 2, meaning
this sequence could be a complete CDS:
>>> coding_dna.translate(table=2, cds=True)
Seq('MAIVMGRWKGAR', ExtendedIUPACProtein())
It isn't a valid CDS under NCBI table 1, due to both the start codon and
also the in frame stop codons:
>>> coding_dna.translate(table=1, cds=True)
Traceback (most recent call last):
...
TranslationError: First codon 'GTG' is not a start codon
If the sequence has no in-frame stop codon, then the to_stop argument
has no effect:
>>> coding_dna2 = Seq("TTGGCCATTGTAATGGGCCGC")
>>> coding_dna2.translate()
Seq('LAIVMGR', ExtendedIUPACProtein())
>>> coding_dna2.translate(to_stop=True)
Seq('LAIVMGR', ExtendedIUPACProtein())
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
NOTE - Does NOT support gapped sequences.
NOTE - This does NOT behave like the python string's translate
method. For that use str(my_seq).translate(...) instead.
"""
if isinstance(table, str) and len(table)==256:
raise ValueError("The Seq object translate method DOES NOT take " \
+ "a 256 character string mapping table like " \
+ "the python string object's translate method. " \
+ "Use str(my_seq).translate(...) instead.")
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be translated!")
try:
table_id = int(table)
except ValueError:
#Assume its a table name
if self.alphabet==IUPAC.unambiguous_dna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_dna_by_name[table]
elif self.alphabet==IUPAC.unambiguous_rna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_rna_by_name[table]
else:
#This will use the extended IUPAC protein alphabet with X etc.
#The same table can be used for RNA or DNA (we use this for
#translating strings).
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
#Assume its a CodonTable object
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError('Bad table argument')
else:
#Assume its a table ID
if self.alphabet==IUPAC.unambiguous_dna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_dna_by_id[table_id]
elif self.alphabet==IUPAC.unambiguous_rna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_rna_by_id[table_id]
else:
#This will use the extended IUPAC protein alphabet with X etc.
#The same table can be used for RNA or DNA (we use this for
#translating strings).
codon_table = CodonTable.ambiguous_generic_by_id[table_id]
protein = _translate_str(str(self), codon_table, \
stop_symbol, to_stop, cds)
if stop_symbol in protein:
alphabet = Alphabet.HasStopCodon(codon_table.protein_alphabet,
stop_symbol = stop_symbol)
else:
alphabet = codon_table.protein_alphabet
return Seq(protein, alphabet)
def ungap(self, gap=None):
"""Return a copy of the sequence without the gap character(s).
The gap character can be specified in two ways - either as an explicit
argument, or via the sequence's alphabet. For example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("-ATA--TGAAAT-TTGAAAA", generic_dna)
>>> my_dna
Seq('-ATA--TGAAAT-TTGAAAA', DNAAlphabet())
>>> my_dna.ungap("-")
Seq('ATATGAAATTTGAAAA', DNAAlphabet())
If the gap character is not given as an argument, it will be taken from
the sequence's alphabet (if defined). Notice that the returned sequence's
alphabet is adjusted since it no longer requires a gapped alphabet:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC, Gapped, HasStopCodon
>>> my_pro = Seq("MVVLE=AD*", HasStopCodon(Gapped(IUPAC.protein, "=")))
>>> my_pro
Seq('MVVLE=AD*', HasStopCodon(Gapped(IUPACProtein(), '='), '*'))
>>> my_pro.ungap()
Seq('MVVLEAD*', HasStopCodon(IUPACProtein(), '*'))
Or, with a simpler gapped DNA example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC, Gapped
>>> my_seq = Seq("CGGGTAG=AAAAAA", Gapped(IUPAC.unambiguous_dna, "="))
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap()
Seq('CGGGTAGAAAAAA', IUPACUnambiguousDNA())
As long as it is consistent with the alphabet, although it is redundant,
you can still supply the gap character as an argument to this method:
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap("=")
Seq('CGGGTAGAAAAAA', IUPACUnambiguousDNA())
However, if the gap character given as the argument disagrees with that
declared in the alphabet, an exception is raised:
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap("-")
Traceback (most recent call last):
...
ValueError: Gap '-' does not match '=' from alphabet
Finally, if a gap character is not supplied, and the alphabet does not
define one, an exception is raised:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("ATA--TGAAAT-TTGAAAA", generic_dna)
>>> my_dna
Seq('ATA--TGAAAT-TTGAAAA', DNAAlphabet())
>>> my_dna.ungap()
Traceback (most recent call last):
...
ValueError: Gap character not given and not defined in alphabet
"""
if hasattr(self.alphabet, "gap_char"):
if not gap:
gap = self.alphabet.gap_char
elif gap != self.alphabet.gap_char:
raise ValueError("Gap %s does not match %s from alphabet" \
% (repr(gap), repr(self.alphabet.gap_char)))
alpha = Alphabet._ungap(self.alphabet)
elif not gap:
raise ValueError("Gap character not given and not defined in alphabet")
else:
alpha = self.alphabet #modify!
if len(gap)!=1 or not isinstance(gap, str):
raise ValueError("Unexpected gap character, %s" % repr(gap))
return Seq(str(self).replace(gap, ""), alpha)
class UnknownSeq(Seq):
"""A read-only sequence object of known length but unknown contents.
If you have an unknown sequence, you can represent this with a normal
Seq object, for example:
>>> my_seq = Seq("N"*5)
>>> my_seq
Seq('NNNNN', Alphabet())
>>> len(my_seq)
5
>>> print(my_seq)
NNNNN
However, this is rather wasteful of memory (especially for large
sequences), which is where this class is most usefull:
>>> unk_five = UnknownSeq(5)
>>> unk_five
UnknownSeq(5, alphabet = Alphabet(), character = '?')
>>> len(unk_five)
5
>>> print(unk_five)
?????
You can add unknown sequence together, provided their alphabets and
characters are compatible, and get another memory saving UnknownSeq:
>>> unk_four = UnknownSeq(4)
>>> unk_four
UnknownSeq(4, alphabet = Alphabet(), character = '?')
>>> unk_four + unk_five
UnknownSeq(9, alphabet = Alphabet(), character = '?')
If the alphabet or characters don't match up, the addition gives an
ordinary Seq object:
>>> unk_nnnn = UnknownSeq(4, character = "N")
>>> unk_nnnn
UnknownSeq(4, alphabet = Alphabet(), character = 'N')
>>> unk_nnnn + unk_four
Seq('NNNN????', Alphabet())
Combining with a real Seq gives a new Seq object:
>>> known_seq = Seq("ACGT")
>>> unk_four + known_seq
Seq('????ACGT', Alphabet())
>>> known_seq + unk_four
Seq('ACGT????', Alphabet())
"""
def __init__(self, length, alphabet = Alphabet.generic_alphabet, character = None):
"""Create a new UnknownSeq object.
If character is ommited, it is determed from the alphabet, "N" for
nucleotides, "X" for proteins, and "?" otherwise.
"""
self._length = int(length)
if self._length < 0:
#TODO - Block zero length UnknownSeq? You can just use a Seq!
raise ValueError("Length must not be negative.")
self.alphabet = alphabet
if character:
if len(character) != 1:
raise ValueError("character argument should be a single letter string.")
self._character = character
else:
base = Alphabet._get_base_alphabet(alphabet)
#TODO? Check the case of the letters in the alphabet?
#We may have to use "n" instead of "N" etc.
if isinstance(base, Alphabet.NucleotideAlphabet):
self._character = "N"
elif isinstance(base, Alphabet.ProteinAlphabet):
self._character = "X"
else:
self._character = "?"
def __len__(self):
"""Returns the stated length of the unknown sequence."""
return self._length
def __str__(self):
"""Returns the unknown sequence as full string of the given length."""
return self._character * self._length
def __repr__(self):
return "UnknownSeq(%i, alphabet = %s, character = %s)" \
% (self._length, repr(self.alphabet), repr(self._character))
def __add__(self, other):
"""Add another sequence or string to this sequence.
Adding two UnknownSeq objects returns another UnknownSeq object
provided the character is the same and the alphabets are compatible.
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(10, generic_protein) + UnknownSeq(5, generic_protein)
UnknownSeq(15, alphabet = ProteinAlphabet(), character = 'X')
If the characters differ, an UnknownSeq object cannot be used, so a
Seq object is returned:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(10, generic_protein) + UnknownSeq(5, generic_protein,
... character="x")
Seq('XXXXXXXXXXxxxxx', ProteinAlphabet())
If adding a string to an UnknownSeq, a new Seq is returned with the
same alphabet:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(5, generic_protein) + "LV"
Seq('XXXXXLV', ProteinAlphabet())
"""
if isinstance(other, UnknownSeq) \
and other._character == self._character:
#TODO - Check the alphabets match
return UnknownSeq(len(self)+len(other),
self.alphabet, self._character)
#Offload to the base class...
return Seq(str(self), self.alphabet) + other
def __radd__(self, other):
#If other is an UnknownSeq, then __add__ would be called.
#Offload to the base class...
return other + Seq(str(self), self.alphabet)
def __getitem__(self, index):
"""Get a subsequence from the UnknownSeq object.
>>> unk = UnknownSeq(8, character="N")
>>> print(unk[:])
NNNNNNNN
>>> print(unk[5:3])
<BLANKLINE>
>>> print(unk[1:-1])
NNNNNN
>>> print(unk[1:-1:2])
NNN
"""
if isinstance(index, int):
#TODO - Check the bounds without wasting memory
return str(self)[index]
old_length = self._length
step = index.step
if step is None or step == 1:
#This calculates the length you'd get from ("N"*old_length)[index]
start = index.start
end = index.stop
if start is None:
start = 0
elif start < 0:
start = max(0, old_length + start)
elif start > old_length:
start = old_length
if end is None:
end = old_length
elif end < 0:
end = max(0, old_length + end)
elif end > old_length:
end = old_length
new_length = max(0, end-start)
elif step == 0:
raise ValueError("slice step cannot be zero")
else:
#TODO - handle step efficiently
new_length = len(("X"*old_length)[index])
#assert new_length == len(("X"*old_length)[index]), \
# (index, start, end, step, old_length,
# new_length, len(("X"*old_length)[index]))
return UnknownSeq(new_length, self.alphabet, self._character)
def count(self, sub, start=0, end=sys.maxsize):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string (and Seq object) method of the
same name, which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
>>> "NNNN".count("N")
4
>>> Seq("NNNN").count("N")
4
>>> UnknownSeq(4, character="N").count("N")
4
>>> UnknownSeq(4, character="N").count("A")
0
>>> UnknownSeq(4, character="N").count("AA")
0
HOWEVER, please note because that python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> UnknownSeq(4, character="N").count("NN")
2
>>> UnknownSeq(4, character="N").count("NNN")
1
"""
sub_str = self._get_seq_str_and_check_alphabet(sub)
if len(sub_str) == 1:
if str(sub_str) == self._character:
if start==0 and end >= self._length:
return self._length
else:
#This could be done more cleverly...
return str(self).count(sub_str, start, end)
else:
return 0
else:
if set(sub_str) == set(self._character):
if start==0 and end >= self._length:
return self._length // len(sub_str)
else:
#This could be done more cleverly...
return str(self).count(sub_str, start, end)
else:
return 0
def complement(self):
"""The complement of an unknown nucleotide equals itself.
>>> my_nuc = UnknownSeq(8)
>>> my_nuc
UnknownSeq(8, alphabet = Alphabet(), character = '?')
>>> print(my_nuc)
????????
>>> my_nuc.complement()
UnknownSeq(8, alphabet = Alphabet(), character = '?')
>>> print(my_nuc.complement())
????????
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
return self
def reverse_complement(self):
"""The reverse complement of an unknown nucleotide equals itself.
>>> my_nuc = UnknownSeq(10)
>>> my_nuc
UnknownSeq(10, alphabet = Alphabet(), character = '?')
>>> print(my_nuc)
??????????
>>> my_nuc.reverse_complement()
UnknownSeq(10, alphabet = Alphabet(), character = '?')
>>> print(my_nuc.reverse_complement())
??????????
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
return self
def transcribe(self):
"""Returns unknown RNA sequence from an unknown DNA sequence.
>>> my_dna = UnknownSeq(10, character="N")
>>> my_dna
UnknownSeq(10, alphabet = Alphabet(), character = 'N')
>>> print(my_dna)
NNNNNNNNNN
>>> my_rna = my_dna.transcribe()
>>> my_rna
UnknownSeq(10, alphabet = RNAAlphabet(), character = 'N')
>>> print(my_rna)
NNNNNNNNNN
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).transcribe()
return UnknownSeq(self._length, s.alphabet, self._character)
def back_transcribe(self):
"""Returns unknown DNA sequence from an unknown RNA sequence.
>>> my_rna = UnknownSeq(20, character="N")
>>> my_rna
UnknownSeq(20, alphabet = Alphabet(), character = 'N')
>>> print(my_rna)
NNNNNNNNNNNNNNNNNNNN
>>> my_dna = my_rna.back_transcribe()
>>> my_dna
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> print(my_dna)
NNNNNNNNNNNNNNNNNNNN
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).back_transcribe()
return UnknownSeq(self._length, s.alphabet, self._character)
def upper(self):
"""Returns an upper case copy of the sequence.
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, generic_dna, character="n")
>>> my_seq
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'n')
>>> print(my_seq)
nnnnnnnnnnnnnnnnnnnn
>>> my_seq.upper()
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> print(my_seq.upper())
NNNNNNNNNNNNNNNNNNNN
This will adjust the alphabet if required. See also the lower method.
"""
return UnknownSeq(self._length, self.alphabet._upper(), self._character.upper())
def lower(self):
"""Returns a lower case copy of the sequence.
This will adjust the alphabet if required:
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, IUPAC.extended_protein)
>>> my_seq
UnknownSeq(20, alphabet = ExtendedIUPACProtein(), character = 'X')
>>> print(my_seq)
XXXXXXXXXXXXXXXXXXXX
>>> my_seq.lower()
UnknownSeq(20, alphabet = ProteinAlphabet(), character = 'x')
>>> print(my_seq.lower())
xxxxxxxxxxxxxxxxxxxx
See also the upper method.
"""
return UnknownSeq(self._length, self.alphabet._lower(), self._character.lower())
def translate(self, **kwargs):
"""Translate an unknown nucleotide sequence into an unknown protein.
e.g.
>>> my_seq = UnknownSeq(11, character="N")
>>> print(my_seq)
NNNNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
UnknownSeq(3, alphabet = ProteinAlphabet(), character = 'X')
>>> print(my_protein)
XXX
In comparison, using a normal Seq object:
>>> my_seq = Seq("NNNNNNNNNNN")
>>> print(my_seq)
NNNNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
Seq('XXX', ExtendedIUPACProtein())
>>> print(my_protein)
XXX
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be translated!")
return UnknownSeq(self._length//3, Alphabet.generic_protein, "X")
def ungap(self, gap=None):
"""Return a copy of the sequence without the gap character(s).
The gap character can be specified in two ways - either as an explicit
argument, or via the sequence's alphabet. For example:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import Gapped, generic_dna
>>> my_dna = UnknownSeq(20, Gapped(generic_dna,"-"))
>>> my_dna
UnknownSeq(20, alphabet = Gapped(DNAAlphabet(), '-'), character = 'N')
>>> my_dna.ungap()
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> my_dna.ungap("-")
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
If the UnknownSeq is using the gap character, then an empty Seq is
returned:
>>> my_gap = UnknownSeq(20, Gapped(generic_dna,"-"), character="-")
>>> my_gap
UnknownSeq(20, alphabet = Gapped(DNAAlphabet(), '-'), character = '-')
>>> my_gap.ungap()
Seq('', DNAAlphabet())
>>> my_gap.ungap("-")
Seq('', DNAAlphabet())
Notice that the returned sequence's alphabet is adjusted to remove any
explicit gap character declaration.
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).ungap()
if s :
return UnknownSeq(self._length, s.alphabet, self._character)
else :
return Seq("", s.alphabet)
class MutableSeq(object):
"""An editable sequence object (with an alphabet).
Unlike normal python strings and our basic sequence object (the Seq class)
which are immuatable, the MutableSeq lets you edit the sequence in place.
However, this means you cannot use a MutableSeq object as a dictionary key.
>>> from Bio.Seq import MutableSeq
>>> from Bio.Alphabet import generic_dna
>>> my_seq = MutableSeq("ACTCGTCGTCG", generic_dna)
>>> my_seq
MutableSeq('ACTCGTCGTCG', DNAAlphabet())
>>> my_seq[5]
'T'
>>> my_seq[5] = "A"
>>> my_seq
MutableSeq('ACTCGACGTCG', DNAAlphabet())
>>> my_seq[5]
'A'
>>> my_seq[5:8] = "NNN"
>>> my_seq
MutableSeq('ACTCGNNNTCG', DNAAlphabet())
>>> len(my_seq)
11
Note that the MutableSeq object does not support as many string-like
or biological methods as the Seq object.
"""
def __init__(self, data, alphabet = Alphabet.generic_alphabet):
if sys.version_info[0] == 3:
self.array_indicator = "u"
else:
self.array_indicator = "c"
if isinstance(data, str): #TODO - What about unicode?
self.data = array.array(self.array_indicator, data)
else:
self.data = data # assumes the input is an array
self.alphabet = alphabet
def __repr__(self):
"""Returns a (truncated) representation of the sequence for debugging."""
if len(self) > 60:
#Shows the last three letters as it is often useful to see if there
#is a stop codon at the end of a sequence.
#Note total length is 54+3+3=60
return "%s('%s...%s', %s)" % (self.__class__.__name__,
str(self[:54]), str(self[-3:]),
repr(self.alphabet))
else:
return "%s('%s', %s)" % (self.__class__.__name__,
str(self),
repr(self.alphabet))
def __str__(self):
"""Returns the full sequence as a python string.
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which needs to be backwards compatible with old Biopython, you
should continue to use my_seq.tostring() rather than str(my_seq).
"""
#See test_GAQueens.py for an historic usage of a non-string alphabet!
return "".join(self.data)
def __cmp__(self, other):
"""Compare the sequence to another sequence or a string (README).
Currently if compared to another sequence the alphabets must be
compatible. Comparing DNA to RNA, or Nucleotide to Protein will raise
an exception. Otherwise only the sequence itself is compared, not the
precise alphabet.
A future release of Biopython will change this (and the Seq object etc)
to use simple string comparison. The plan is that comparing sequences
with incompatible alphabets (e.g. DNA to RNA) will trigger a warning
but not an exception.
During this transition period, please just do explicit comparisons:
>>> seq1 = MutableSeq("ACGT")
>>> seq2 = MutableSeq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
This method indirectly supports ==, < , etc.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
import warnings
warnings.warn("In future comparing incompatible alphabets will "
"only trigger a warning (not an exception). In "
"the interim please use id(seq1)==id(seq2) or "
"str(seq1)==str(seq2) to make your code explicit "
"and to avoid this warning.", FutureWarning)
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Comparing the arrays supports this.
return cmp(self.data, other.data)
else:
return cmp(str(self), str(other))
elif isinstance(other, basestring):
return cmp(str(self), other)
else:
raise TypeError
def __len__(self): return len(self.data)
def __getitem__(self, index):
#Note since Python 2.0, __getslice__ is deprecated
#and __getitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Return a single letter as a string
return self.data[index]
else:
#Return the (sub)sequence as another Seq object
return MutableSeq(self.data[index], self.alphabet)
def __setitem__(self, index, value):
#Note since Python 2.0, __setslice__ is deprecated
#and __setitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Replacing a single letter with a new string
self.data[index] = value
else:
#Replacing a sub-sequence
if isinstance(value, MutableSeq):
self.data[index] = value.data
elif isinstance(value, type(self.data)):
self.data[index] = value
else:
self.data[index] = array.array(self.array_indicator,
str(value))
def __delitem__(self, index):
#Note since Python 2.0, __delslice__ is deprecated
#and __delitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
#Could be deleting a single letter, or a slice
del self.data[index]
def __add__(self, other):
"""Add another sequence or string to this sequence.
Returns a new MutableSeq object."""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Adding the arrays should support this.
return self.__class__(self.data + other.data, a)
else:
return self.__class__(str(self) + str(other), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(self) + str(other), self.alphabet)
else:
raise TypeError
def __radd__(self, other):
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Adding the arrays should support this.
return self.__class__(other.data + self.data, a)
else:
return self.__class__(str(other) + str(self), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(other) + str(self), self.alphabet)
else:
raise TypeError
def append(self, c):
self.data.append(c)
def insert(self, i, c):
self.data.insert(i, c)
def pop(self, i = (-1)):
c = self.data[i]
del self.data[i]
return c
def remove(self, item):
for i in range(len(self.data)):
if self.data[i] == item:
del self.data[i]
return
raise ValueError("MutableSeq.remove(x): x not in list")
def count(self, sub, start=0, end=sys.maxsize):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import MutableSeq
>>> my_mseq = MutableSeq("AAAATGA")
>>> print(my_mseq.count("A"))
5
>>> print(my_mseq.count("ATG"))
1
>>> print(my_mseq.count(Seq("AT")))
1
>>> print(my_mseq.count("AT", 2, -1))
1
HOWEVER, please note because that python strings, Seq objects and
MutableSeq objects do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print(MutableSeq("AAAA").count("AA"))
2
A non-overlapping search would give the answer as three!
"""
try:
#TODO - Should we check the alphabet?
search = sub.tostring()
except AttributeError:
search = sub
if not isinstance(search, basestring):
raise TypeError("expected a string, Seq or MutableSeq")
if len(search) == 1:
#Try and be efficient and work directly from the array.
count = 0
for c in self.data[start:end]:
if c == search: count += 1
return count
else:
#TODO - Can we do this more efficiently?
return self.tostring().count(search, start, end)
def index(self, item):
for i in range(len(self.data)):
if self.data[i] == item:
return i
raise ValueError("MutableSeq.index(x): x not in list")
def reverse(self):
"""Modify the mutable sequence to reverse itself.
No return value.
"""
self.data.reverse()
def complement(self):
"""Modify the mutable sequence to take on its complement.
Trying to complement a protein sequence raises an exception.
No return value.
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
if self.alphabet in (IUPAC.ambiguous_dna, IUPAC.unambiguous_dna):
d = ambiguous_dna_complement
elif self.alphabet in (IUPAC.ambiguous_rna, IUPAC.unambiguous_rna):
d = ambiguous_rna_complement
elif 'U' in self.data and 'T' in self.data:
#TODO - Handle this cleanly?
raise ValueError("Mixed RNA/DNA found")
elif 'U' in self.data:
d = ambiguous_rna_complement
else:
d = ambiguous_dna_complement
c = dict([(x.lower(), y.lower()) for x,y in d.items()])
d.update(c)
self.data = map(lambda c: d[c], self.data)
self.data = array.array(self.array_indicator, self.data)
def reverse_complement(self):
"""Modify the mutable sequence to take on its reverse complement.
Trying to reverse complement a protein sequence raises an exception.
No return value.
"""
self.complement()
self.data.reverse()
## Sorting a sequence makes no sense.
# def sort(self, *args): self.data.sort(*args)
def extend(self, other):
if isinstance(other, MutableSeq):
for c in other.data:
self.data.append(c)
else:
for c in other:
self.data.append(c)
def tostring(self):
"""Returns the full sequence as a python string (semi-obsolete).
Although not formally deprecated, you are now encouraged to use
str(my_seq) instead of my_seq.tostring().
Because str(my_seq) will give you the full sequence as a python string,
there is often no need to make an explicit conversion. For example,
print("ID={%s}, sequence={%s}" % (my_name, my_seq))
On Biopython 1.44 or older you would have to have done this:
print("ID={%s}, sequence={%s}" % (my_name, my_seq.tostring()))
"""
return "".join(self.data)
def toseq(self):
"""Returns the full sequence as a new immutable Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_mseq = MutableSeq("MKQHKAMIVALIVICITAVVAAL",
... IUPAC.protein)
>>> my_mseq
MutableSeq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
>>> my_mseq.toseq()
Seq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
Note that the alphabet is preserved.
"""
return Seq("".join(self.data), self.alphabet)
# The transcribe, backward_transcribe, and translate functions are
# user-friendly versions of the corresponding functions in Bio.Transcribe
# and Bio.Translate. The functions work both on Seq objects, and on strings.
def transcribe(dna):
"""Transcribes a DNA sequence into RNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object with an RNA alphabet.
Trying to transcribe a protein or RNA sequence raises an exception.
e.g.
>>> transcribe("ACTGN")
'ACUGN'
"""
if isinstance(dna, Seq):
return dna.transcribe()
elif isinstance(dna, MutableSeq):
return dna.toseq().transcribe()
else:
return dna.replace('T','U').replace('t','u')
def back_transcribe(rna):
"""Back-transcribes an RNA sequence into DNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object with an RNA alphabet.
Trying to transcribe a protein or DNA sequence raises an exception.
e.g.
>>> back_transcribe("ACUGN")
'ACTGN'
"""
if isinstance(rna, Seq):
return rna.back_transcribe()
elif isinstance(rna, MutableSeq):
return rna.toseq().back_transcribe()
else:
return rna.replace('U','T').replace('u','t')
def _translate_str(sequence, table, stop_symbol="*", to_stop=False,
cds=False, pos_stop="X"):
"""Helper function to translate a nucleotide string (PRIVATE).
Arguments:
- sequence - a string
- table - a CodonTable object (NOT a table name or id number)
- stop_symbol - a single character string, what to use for terminators.
- to_stop - boolean, should translation terminate at the first
in frame stop codon? If there is no in-frame stop codon
then translation continues to the end.
- pos_stop - a single character string for a possible stop codon
(e.g. TAN or NNN)
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
Returns a string.
e.g.
>>> from Bio.Data import CodonTable
>>> table = CodonTable.ambiguous_dna_by_id[1]
>>> _translate_str("AAA", table)
'K'
>>> _translate_str("TAR", table)
'*'
>>> _translate_str("TAN", table)
'X'
>>> _translate_str("TAN", table, pos_stop="@")
'@'
>>> _translate_str("TA?", table)
Traceback (most recent call last):
...
TranslationError: Codon 'TA?' is invalid
>>> _translate_str("ATGCCCTAG", table, cds=True)
'MP'
>>> _translate_str("AAACCCTAG", table, cds=True)
Traceback (most recent call last):
...
TranslationError: First codon 'AAA' is not a start codon
>>> _translate_str("ATGCCCTAGCCCTAG", table, cds=True)
Traceback (most recent call last):
...
TranslationError: Extra in frame stop codon found.
"""
sequence = sequence.upper()
amino_acids = []
forward_table = table.forward_table
stop_codons = table.stop_codons
if table.nucleotide_alphabet.letters is not None:
valid_letters = set(table.nucleotide_alphabet.letters.upper())
else:
#Assume the worst case, ambiguous DNA or RNA:
valid_letters = set(IUPAC.ambiguous_dna.letters.upper() + \
IUPAC.ambiguous_rna.letters.upper())
if cds:
if str(sequence[:3]).upper() not in table.start_codons:
raise CodonTable.TranslationError(\
"First codon '%s' is not a start codon" % sequence[:3])
if len(sequence) % 3 != 0:
raise CodonTable.TranslationError(\
"Sequence length %i is not a multiple of three" % len(sequence))
if str(sequence[-3:]).upper() not in stop_codons:
raise CodonTable.TranslationError(\
"Final codon '%s' is not a stop codon" % sequence[-3:])
#Don't translate the stop symbol, and manually translate the M
sequence = sequence[3:-3]
amino_acids = ["M"]
n = len(sequence)
for i in xrange(0,n-n%3,3):
codon = sequence[i:i+3]
try:
amino_acids.append(forward_table[codon])
except (KeyError, CodonTable.TranslationError):
#Todo? Treat "---" as a special case (gapped translation)
if codon in table.stop_codons:
if cds:
raise CodonTable.TranslationError(\
"Extra in frame stop codon found.")
if to_stop : break
amino_acids.append(stop_symbol)
elif valid_letters.issuperset(set(codon)):
#Possible stop codon (e.g. NNN or TAN)
amino_acids.append(pos_stop)
else:
raise CodonTable.TranslationError(\
"Codon '%s' is invalid" % codon)
return "".join(amino_acids)
def translate(sequence, table="Standard", stop_symbol="*", to_stop=False,
cds=False):
"""Translate a nucleotide sequence into amino acids.
If given a string, returns a new string object. Given a Seq or
MutableSeq, returns a Seq object with a protein alphabet.
Arguments:
- table - Which codon table to use? This can be either a name (string),
an NCBI identifier (integer), or a CodonTable object (useful
for non-standard genetic codes). Defaults to the "Standard"
table.
- stop_symbol - Single character string, what to use for any
terminators, defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons
(translated as the specified stop_symbol). If
True, translation is terminated at the first in
frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
A simple string example using the default (standard) genetic code:
>>> coding_dna = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
>>> translate(coding_dna)
'VAIVMGR*KGAR*'
>>> translate(coding_dna, stop_symbol="@")
'VAIVMGR@KGAR@'
>>> translate(coding_dna, to_stop=True)
'VAIVMGR'
Now using NCBI table 2, where TGA is not a stop codon:
>>> translate(coding_dna, table=2)
'VAIVMGRWKGAR*'
>>> translate(coding_dna, table=2, to_stop=True)
'VAIVMGRWKGAR'
In fact this example uses an alternative start codon valid under NCBI table 2,
GTG, which means this example is a complete valid CDS which when translated
should really start with methionine (not valine):
>>> translate(coding_dna, table=2, cds=True)
'MAIVMGRWKGAR'
Note that if the sequence has no in-frame stop codon, then the to_stop
argument has no effect:
>>> coding_dna2 = "GTGGCCATTGTAATGGGCCGC"
>>> translate(coding_dna2)
'VAIVMGR'
>>> translate(coding_dna2, to_stop=True)
'VAIVMGR'
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
NOTE - Does NOT support gapped sequences.
It will however translate either DNA or RNA.
"""
if isinstance(sequence, Seq):
return sequence.translate(table, stop_symbol, to_stop, cds)
elif isinstance(sequence, MutableSeq):
#Return a Seq object
return sequence.toseq().translate(table, stop_symbol, to_stop, cds)
else:
#Assume its a string, return a string
try:
codon_table = CodonTable.ambiguous_generic_by_id[int(table)]
except ValueError:
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError('Bad table argument')
return _translate_str(sequence, codon_table, stop_symbol, to_stop, cds)
def reverse_complement(sequence):
"""Returns the reverse complement sequence of a nucleotide string.
If given a string, returns a new string object.
Given a Seq or a MutableSeq, returns a new Seq object with the same alphabet.
Supports unambiguous and ambiguous nucleotide sequences.
e.g.
>>> reverse_complement("ACTG-NH")
'DN-CAGT'
"""
if isinstance(sequence, Seq):
#Return a Seq
return sequence.reverse_complement()
elif isinstance(sequence, MutableSeq):
#Return a Seq
#Don't use the MutableSeq reverse_complement method as it is 'in place'.
return sequence.toseq().reverse_complement()
#Assume its a string.
#In order to avoid some code duplication, the old code would turn the string
#into a Seq, use the reverse_complement method, and convert back to a string.
#This worked, but is over five times slower on short sequences!
if ('U' in sequence or 'u' in sequence) \
and ('T' in sequence or 't' in sequence):
raise ValueError("Mixed RNA/DNA found")
elif 'U' in sequence or 'u' in sequence:
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
return sequence.translate(ttable)[::-1]
def _test():
"""Run the Bio.Seq module's doctests (PRIVATE)."""
if sys.version_info[0:2] == (3,1):
print("Not running Bio.Seq doctest on Python 3.1")
print("See http://bugs.python.org/issue7490")
else:
print("Runing doctests...")
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
print("Done")
if __name__ == "__main__":
_test()
| 39.794668 | 177 | 0.604013 |
b78c2cf2b2693b031c0ed27bfb0e8f51b15acaad | 4,919 | py | Python | src/fair-preprocessing/adult/AC10.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | 3 | 2021-09-01T10:42:46.000Z | 2022-01-24T06:44:36.000Z | src/fair-preprocessing/adult/AC10.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | null | null | null | src/fair-preprocessing/adult/AC10.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.append('../../../')
from utils.packages import *
from utils.ml_fairness import *
from utils.standard_data import *
dir = 'res/adult10-'
d_fields = ['Pipeline', 'Stage', 'SF_SPD', 'SF_EOD', 'SF_AOD', 'SD_ERD', 'Acc', 'F1']
diff_file = dir + 'fairness' + '.csv'
if(not os.path.isfile(diff_file)):
with open(diff_file, 'a') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(d_fields)
# In[2]:
train_path = '../../../data/adult/adult.data'
test_path = '../../../data/adult/adult.test'
column_names = ['age', 'workclass', 'fnlwgt', 'education',
'education-num', 'marital-status', 'occupation', 'relationship',
'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week',
'native-country', 'income-per-year']
na_values=['?']
train = pd.read_csv(train_path, header=None, names=column_names,
skipinitialspace=True, na_values=na_values)
test = pd.read_csv(test_path, header=0, names=column_names,
skipinitialspace=True, na_values=na_values)
df = pd.concat([test, train], ignore_index=True)
# In[3]:
##### Drop na values
# dropped = df.dropna()
# count = df.shape[0] - dropped.shape[0]
# print("Missing Data: {} rows removed.".format(count))
# df = dropped
y1_df = df.copy()
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
df['workclass'] = imputer.fit_transform(df[['workclass']]).ravel()
df['occupation'] = imputer.fit_transform(df[['occupation']]).ravel()
df['native-country'] = imputer.fit_transform(df[['native-country']]).ravel()
y1_df["workclass"] = y1_df["workclass"].fillna("X")
y1_df["occupation"] = y1_df["occupation"].fillna("X")
y1_df["native-country"] = y1_df["native-country"].fillna("x")
# nested_categorical_feature_transformation = Pipeline(steps=[
# ('impute', SimpleImputer(missing_values=np.nan, strategy='most_frequent')),
# # ('encode', OneHotEncoder(handle_unknown='ignore'))
# ])
# In[4]:
# Create a one-hot encoding of the categorical variables.
cat_feat = ['sex', 'workclass', 'education', 'marital-status', 'occupation', 'relationship', 'native-country']
df = pd.get_dummies(df, columns=cat_feat, prefix_sep='=')
y1_df = pd.get_dummies(y1_df, columns=cat_feat, prefix_sep='=')
# for feature in cat_feat:
# le = LabelEncoder()
# y2_df[feature] = le.fit_transform(y2_df[feature])
# for feature in cat_feat:
# le = LabelEncoder()
# y1_df[feature] = le.fit_transform(y1_df[feature])
# In[5]:
seed = randrange(100)
y2_train, y2_test = train_test_split(df, test_size = 0.3, random_state = seed, stratify=df['income-per-year'])
y1_train, y1_test = train_test_split(y1_df, test_size = 0.3, random_state = seed) # stratify = df['income-per-year']
pro_att_name = ['race'] # ['race', 'sex']
priv_class = ['White'] # ['White', 'Male']
reamining_cat_feat = []
y2_data_orig_train, y2_X_train, y2_y_train = load_adult_data(y2_train, pro_att_name, priv_class, reamining_cat_feat)
y2_data_orig_test, y2_X_test, y2_y_test = load_adult_data(y2_test, pro_att_name, priv_class, reamining_cat_feat)
y1_data_orig_train, y1_X_train, y1_y_train = load_adult_data(y1_train, pro_att_name, priv_class, reamining_cat_feat)
y1_data_orig_test, y1_X_test, y1_y_test = load_adult_data(y1_test, pro_att_name, priv_class, reamining_cat_feat)
# In[6]:
y2_model = DecisionTreeClassifier()
y2_mdl = y2_model.fit(y2_X_train, y2_y_train)
y1_model = DecisionTreeClassifier()
y1_mdl = y1_model.fit(y1_X_train, y1_y_train)
# In[7]:
# plot_model_performance(y2_mdl, y2_X_test, y2_y_test)
y1_pred, y1_fair = get_fair_metrics_and_plot('filename', y1_data_orig_test, y1_mdl)
y2_pred, y2_fair = get_fair_metrics_and_plot('filename', y2_data_orig_test, y2_mdl)
y1_fair = y1_fair.drop(['DI', 'CNT', 'TI'], axis=1)
y2_fair = y2_fair.drop(['DI', 'CNT', 'TI'], axis=1)
CVR, CVD, AVR_EOD, AVD_EOD, AVR_SPD, AVD_SPD, AVD_AOD, AV_ERD = compute_new_metrics(y2_data_orig_test, y1_pred, y2_pred)
row_y1 = y1_fair.iloc[[0]].values[0].tolist()
row_y2 = y2_fair.iloc[[0]].values[0].tolist()
diff = []
# diff.append(CVR)
# diff.append(CVD)
diff.append(AVD_SPD)
diff.append(AVD_EOD)
diff.append(AVD_AOD)
diff.append(AV_ERD)
for i in range(len(row_y2)):
if(i < 2):
change = row_y2[i] - row_y1[i]
else:
break;
diff.append(change)
stage = 'Imputation'
model_name = 'adult10'
# diff = diff_df.iloc[0].values.tolist()
diff.insert(0, stage)
diff.insert(0, model_name)
cols = ['Pipeline', 'Stage', 'SF_SPD', 'SF_EOD', 'SF_AOD', 'SD_ERD', 'Acc', 'F1']
# metrics = pd.DataFrame(data=obj_fairness, index=['y1'], columns=cols)
diff_df = pd.DataFrame(data=[diff], columns = cols, index = ['Diff']).round(3)
with open(diff_file, 'a') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(diff)
| 30.937107 | 120 | 0.69445 |
015ea9cc8d85fa4923e6f8382f8dc1786d68306b | 10,378 | py | Python | algo_visual _JRMP.py | Sun-Zhaohong/JRMP | f35b3c371f94b7064e788525fa81100510cd876a | [
"MIT"
] | null | null | null | algo_visual _JRMP.py | Sun-Zhaohong/JRMP | f35b3c371f94b7064e788525fa81100510cd876a | [
"MIT"
] | null | null | null | algo_visual _JRMP.py | Sun-Zhaohong/JRMP | f35b3c371f94b7064e788525fa81100510cd876a | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
#######################################################################
# setup for figure
#######################################################################
fig = plt.figure(dpi=300)
# labels for x-axis / y-axis
x_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
y_labels = ['0', '40', '80', '120', '160', '200']
# location for x labels
x_ticks = np.arange(len(x_labels))
y_ticks = np.linspace(0.0, 200.0, 6)
# the width of the bars
width = 0.25
f_size = 6
legend_size = 10
#######################################################################
# sub-figure (3, 3, 1)
#######################################################################
y11 = [34.9, 59.45, 83.6, 107.9, 132.05, 156.0, 178.5, 196.65, 200.0, 200.0]
y12 = [25.15, 50.35, 73.9, 99.4, 126.5, 152.25, 178.2, 196.65, 200.0, 200.0]
y13 = [35.1, 58.05, 81.4, 104.75, 127.6, 151.15, 173.25, 191.9, 200.0, 200.0]
ax1 = plt.subplot(3, 3, 1)
# set_xticks
ax1.set_xticks(x_ticks)
ax1.set_yticks(y_ticks)
ax1.set_xticklabels(x_labels, fontsize=f_size)
ax1.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 1)
ax1.set_title("ratio=1.2 \u03B8=0.2", fontsize=f_size)
# draw a dummy point without color
plt.scatter(1.0, 200.0, marker='o', s=200, facecolors='none')
# draw bars for each algorithm
rects1 = ax1.bar(x_ticks - width, y11, width, label='GDA-RH')
rects2 = ax1.bar(x_ticks, y12, width, label='GDA-RO')
rects3 = ax1.bar(x_ticks + width, y13, width, label='ACDA')
# legend of figures
ax1.legend((rects1, rects2, rects3), # The line objects
('GDA-RH', 'GDA-RO', 'ACDA'), # The labels for each line
loc="upper left", # Position of legend
borderaxespad=0.1, # Small spacing around legend box
# title="Legend Title", # Title for the legend
fontsize= 5
)
# x-axis / y-axis of figures
fig.text(0.5, 0.04, 'Rank of hospitals', ha='center', va='center', fontsize=legend_size)
fig.text(0.03, 0.5, 'Number of matched doctors', ha='center', va='center', rotation='vertical', fontsize=legend_size)
#######################################################################
# sub-figure (3, 3, 2)
#######################################################################
y21 = [67.25, 98.85, 123.5, 147.0, 167.0, 184.6, 196.3, 199.95, 200.0, 200.0]
y22 = [40.85, 76.35, 108.75, 140.95, 169.8, 187.45, 197.35, 199.95, 200.0, 200.0]
y23 = [69.9, 98.4, 122.0, 144.0, 163.2, 180.85, 192.85, 198.95, 200.0, 200.0]
ax2 = plt.subplot(3, 3, 2)
# set_xticks
ax2.set_xticks(x_ticks)
ax2.set_yticks(y_ticks)
ax2.set_xticklabels(x_labels, fontsize=f_size)
ax2.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 2)
ax2.set_title("ratio=1.2 \u03B8=0.5", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax2.bar(x_ticks - width, y21, width, label='GDA-RH')
rects2 = ax2.bar(x_ticks, y22, width, label='GDA-RO')
rects3 = ax2.bar(x_ticks + width, y23, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 3)
#######################################################################
y31 = [150.65, 177.7, 190.6, 195.75, 198.5, 199.5, 199.95, 200.0, 200.0, 200.0]
y32 = [128.7, 178.7, 195.05, 199.25, 199.95, 200.0, 200.0, 200.0, 200.0, 200.0]
y33 = [146.0, 173.6, 187.15, 193.95, 197.45, 199.05, 199.85, 200.0, 200.0, 200.0]
ax3 = plt.subplot(3, 3, 3)
# set_xticks
ax3.set_xticks(x_ticks)
ax3.set_yticks(y_ticks)
ax3.set_xticklabels(x_labels, fontsize=f_size)
ax3.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 3)
ax3.set_title("ratio=1.2 \u03B8=0.8", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax3.bar(x_ticks - width, y31, width, label='GDA-RH')
rects2 = ax3.bar(x_ticks, y32, width, label='GDA-RO')
rects3 = ax3.bar(x_ticks + width, y33, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 4)
#######################################################################
y41 = [41.55, 73.2, 102.45, 132.25, 160.4, 186.95, 199.4, 200.0, 200.0, 200.0]
y42 = [32.85, 64.6, 96.15, 129.85, 161.15, 187.45, 199.35, 200.0, 200.0, 200.0]
y43 = [34.65, 58.85, 80.8, 103.05, 126.85, 150.6, 173.65, 191.9, 200.0, 200.0]
ax4 = plt.subplot(3, 3, 4)
# set_xticks
ax4.set_xticks(x_ticks)
ax4.set_yticks(y_ticks)
ax4.set_xticklabels(x_labels, fontsize=f_size)
ax4.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 4)
ax4.set_title("ratio=1.5 \u03B8=0.2", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax4.bar(x_ticks - width, y41, width, label='GDA-RH')
rects2 = ax4.bar(x_ticks, y42, width, label='GDA-RO')
rects3 = ax4.bar(x_ticks + width, y43, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 5)
#######################################################################
y51 = [74.3, 111.35, 143.0, 168.05, 187.45, 197.5, 199.85, 200.0, 200.0, 200.0]
y52 = [53.0, 98.95, 138.55, 169.95, 189.25, 197.8, 199.8, 200.0, 200.0, 200.0]
y53 = [68.25, 99.15, 123.15, 143.3, 163.4, 181.0, 192.05, 199.4, 200.0, 200.0]
ax5 = plt.subplot(3, 3, 5)
# set_xticks
ax5.set_xticks(x_ticks)
ax5.set_yticks(y_ticks)
ax5.set_xticklabels(x_labels, fontsize=f_size)
ax5.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 5)
ax5.set_title("ratio=1.5 \u03B8=0.5", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax5.bar(x_ticks - width, y51, width, label='GDA-RH')
rects2 = ax5.bar(x_ticks, y52, width, label='GDA-RO')
rects3 = ax5.bar(x_ticks + width, y53, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 6)
#######################################################################
y61 = [163.5, 190.6, 198.8, 199.75, 200.0, 200.0, 200.0, 200.0, 200.0, 200.0]
y62 = [161.25, 193.25, 198.9, 200.0, 200.0, 200.0, 200.0, 200.0, 200.0, 200.0]
y63 = [149.15, 175.2, 188.65, 195.15, 198.2, 199.45, 199.9, 200.0, 200.0, 200.0]
ax6 = plt.subplot(3, 3, 6)
# set_xticks
ax6.set_xticks(x_ticks)
ax6.set_yticks(y_ticks)
ax6.set_xticklabels(x_labels, fontsize=f_size)
ax6.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 6)
ax6.set_title("ratio=1.5 \u03B8=0.8", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax6.bar(x_ticks - width, y61, width, label='GDA-RH')
rects2 = ax6.bar(x_ticks, y62, width, label='GDA-RO')
rects3 = ax6.bar(x_ticks + width, y63, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 7)
#######################################################################
y71 = [50.15, 88.05, 123.7, 160.45, 189.35, 199.45, 200.0, 200.0, 200.0, 200.0]
y72 = [45.3, 85.4, 124.25, 162.3, 188.85, 199.1, 199.95, 200.0, 200.0, 200.0]
y73 = [34.95, 58.95, 82.15, 105.5, 126.9, 151.1, 172.6, 191.6, 200.0, 200.0]
ax7 = plt.subplot(3, 3, 7)
# set_xticks
ax7.set_xticks(x_ticks)
ax7.set_yticks(y_ticks)
ax7.set_xticklabels(x_labels, fontsize=f_size)
ax7.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 7)
ax7.set_title("ratio=2.0 \u03B8=0.2", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax7.bar(x_ticks - width, y71, width, label='GDA-RH')
rects2 = ax7.bar(x_ticks, y72, width, label='GDA-RO')
rects3 = ax7.bar(x_ticks + width, y73, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 8)
#######################################################################
y81 = [91.9, 138.9, 174.85, 192.3, 197.6, 199.5, 200.0, 200.0, 200.0, 200.0]
y82 = [83.4, 138.45, 175.45, 192.25, 197.5, 199.45, 200.0, 200.0, 200.0, 200.0]
y83 = [67.8, 96.85, 123.9, 146.65, 165.0, 181.6, 194.7, 199.95, 200.0, 200.0]
ax8 = plt.subplot(3, 3, 8)
# set_xticks
ax8.set_xticks(x_ticks)
ax8.set_yticks(y_ticks)
ax8.set_xticklabels(x_labels, fontsize=f_size)
ax8.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 8)
ax8.set_title("ratio=2.0 \u03B8=0.5", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax8.bar(x_ticks - width, y81, width, label='GDA-RH')
rects2 = ax8.bar(x_ticks, y82, width, label='GDA-RO')
rects3 = ax8.bar(x_ticks + width, y83, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
#######################################################################
# sub-figure (3, 3, 9)
#######################################################################
y91 = [178.75, 197.1, 199.55, 199.9, 200.0, 200.0, 200.0, 200.0, 200.0, 200.0]
y92 = [178.7, 196.65, 199.45, 199.9, 200.0, 200.0, 200.0, 200.0, 200.0, 200.0]
y93 = [146.25, 174.35, 187.75, 194.35, 197.7, 199.45, 200.0, 200.0, 200.0, 200.0]
ax9 = plt.subplot(3, 3, 9)
# set_xticks
ax9.set_xticks(x_ticks)
ax9.set_yticks(y_ticks)
ax9.set_xticklabels(x_labels, fontsize=f_size)
ax9.set_yticklabels(y_labels, fontsize=f_size)
# set title of sub-figure (3, 3, 9)
ax9.set_title("ratio=2.0 \u03B8=0.8", fontsize=f_size)
# draw bars for each algorithm
rects1 = ax9.bar(x_ticks - width, y91, width, label='GDA-RH')
rects2 = ax9.bar(x_ticks, y92, width, label='GDA-RO')
rects3 = ax9.bar(x_ticks + width, y93, width, label='ACDA')
# draw a dummy point without color
plt.scatter(1.0, 1.0, marker='o', s=200, facecolors='none')
plt.show()
| 33.585761 | 118 | 0.550395 |
2e9684a5b30b8344fc51e58e4c374cdb168969bf | 5,016 | py | Python | nwb/nwb_file.py | bendichter/api-python | 52e97e7642021913ae6505ab63b7cc77d2622d76 | [
"BSD-3-Clause"
] | 32 | 2015-08-21T14:14:44.000Z | 2017-08-31T09:33:14.000Z | nwb/nwb_file.py | bendichter/api-python | 52e97e7642021913ae6505ab63b7cc77d2622d76 | [
"BSD-3-Clause"
] | 24 | 2015-11-18T11:17:04.000Z | 2019-12-31T19:44:18.000Z | nwb/nwb_file.py | bendichter/api-python | 52e97e7642021913ae6505ab63b7cc77d2622d76 | [
"BSD-3-Clause"
] | 18 | 2015-10-07T03:04:41.000Z | 2022-03-11T18:52:20.000Z | import sys
import time
import os.path
from . import h5gate as g
from . import nwb_init as ni
def open(file_name, start_time=None, mode="w-", identifier=None, description=None,
core_spec="nwb_core.py", extensions=[], default_ns="core",
keep_original=False, auto_compress=True, verbosity="all"):
"""
Open NWB file. Initialize identifier and description if "write" mode.
Returns h5gate File object which is used by API to add content to the file.
Inputs are:
**file_name** - Name of file to create or open. Text. Required.
**start_time** - Starting time for the experiment. Is used only if writing
a file (mode="w"). If not specified, the current time is used.
**mode** - Mode of file access. One of:
'r' - Readonly, file must exist. (currently only used for validation).
'r+' - Read/write, file must exist.
'w' - Create file, replacing if exists. (Default)
'w-' - Create file, fail if exists.
'a' - Read/write if exists, create otherwise.
**identifier** - Unique identifier for the file. Required if "w" mode. Not
used otherwise.
**description** - A one or two sentence description of the experiment and what
the data inside represents. Required if "w" mode. Not used otherwise.
**core_spec** - Name of file containing core specification of NWB format.
If "-", load saved spec from NWB file (used when opening an existing file).
**extensions** - List of files containing extensions to the format that may
be used. Empty list if no extensions or if extension specifications should be
loaded from NWB file.
**default_ns** - Namespace of specification to use as default if no namespace
specified when creating groups or datasets. Normally, the default value ("core")
should be used, since that is the namespace used in the default core_spec
("nwb_core.py")
**keep_original** - If True and mode is "w" or "r+" or "a" (modes that can change
and exiting file), a backup copy of any original file will be saved with the name
"<filename>.prev".
**auto_compress** - If true, data is compressed automatically through the API.
Otherwise, the data is not automatically compressed.
**verbosity** - Controls how much validation output is displayed. Options are:
'all' (default), 'summary', and 'none'. 'none' is mainly useful for unittests.
"""
# set unicode to str if using Python 3 (which does not have unicode class)
try:
unicode
except NameError:
unicode = str
# check for required fields
errors = []
if not file_name or not isinstance(file_name, (str, unicode)):
errors.append("file_name must be specified and be type string or unicode")
if not core_spec or not isinstance(core_spec, str):
errors.append("core_spec must be specified and be a string")
valid_modes = ("r", "r+", "w", "w-", "a")
if mode not in valid_modes:
errors.append("Invalid mode. Must be one of: %s" % valid_modes)
file_exists = os.path.isfile(file_name)
if not file_exists and mode in ('r', 'r+'):
errors.append("File not found. File must exist to use mode 'r' or 'r+'")
else:
creating_file = mode=="w" or (mode in ('a', 'w-') and not file_exists)
if creating_file:
# must be creating a new file. identifier and description required.
if not identifier or not isinstance(identifier, str):
errors.append("When creating a file, 'identifier' must be specified and be a string")
if not description or not isinstance(description, str):
errors.append("When creating a file, 'description' must be specified and be a string")
if not isinstance(extensions, (list, tuple)) or (len(extensions) > 0 and
not all(isinstance(item, str) for item in extensions)):
errors.append("extensions must be a list or tuple, either empty or containing only strings")
# if file_exists and mode == "w" and not overwrite:
# errors.append("Cannot overwrite existing file if overwrite option is False")
if errors:
print ("Error(s) found:")
print ("\n".join(errors))
sys.exit(1)
# setup options for h5gate
options = {}
options['mode'] = mode
options['keep_original'] = keep_original
options['auto_compress'] = auto_compress
options['verbosity'] = verbosity
# options['schema_id_attr'] = "neurodata_type"
options['custom_node_identifier'] = ["neurodata_type", "Custom"]
# options['custom_node_identifier'] = ["schema_id", "Custom"]
spec_files = extensions + [core_spec] if core_spec != '-' else []
# open file
f = g.File(file_name, spec_files, default_ns, options)
# set initial metadata and call_back for updating modification_time
ni.nwb_init(f, mode, start_time, identifier, description, creating_file)
return f
| 45.6 | 104 | 0.660088 |
69dfa4c3e73cf837981d50c77e1126914be4cb65 | 110 | py | Python | fs_image/rpm/repo_db.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | fs_image/rpm/repo_db.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | fs_image/rpm/repo_db.py | philipjameson/buckit | 83b4ba7fc7a7a9d28b7a66117de6d6beccfdf7f8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import enum
class SQLDialect(enum.Enum):
SQLITE3 = 'sqlite3'
MYSQL = 'mysql'
| 13.75 | 28 | 0.663636 |
ebbf692b01e611ac6da64cbcd8d2ec4a3072fff7 | 911 | py | Python | csm_web/scheduler/management/commands/patchattendances.py | shadaj/csm_web | aab7920987bf3212b6da7fa26bac24cca77c6d03 | [
"MIT"
] | null | null | null | csm_web/scheduler/management/commands/patchattendances.py | shadaj/csm_web | aab7920987bf3212b6da7fa26bac24cca77c6d03 | [
"MIT"
] | null | null | null | csm_web/scheduler/management/commands/patchattendances.py | shadaj/csm_web | aab7920987bf3212b6da7fa26bac24cca77c6d03 | [
"MIT"
] | null | null | null | from django.utils import timezone
from django.core.management import BaseCommand
from scheduler.models import Student, Attendance, week_bounds
class Command(BaseCommand):
help = "Creates a single attendance for this week for all students who have no attendances for this week."
def handle(self, *args, **options):
week_start, week_end = week_bounds(timezone.now().date())
students = Student.objects.filter(active=True).select_related(
"section__course", "user") .exclude(attendance__date__range=(week_start, week_end))
# Note that bulk_create can only set the primary key in Postgres, so this won't work as expected in development if using Sqlite
print(f"Updating attendance for week of {week_start}")
print('\n'.join(map(str, students)))
Attendance.objects.bulk_create(Attendance(student=student, date=week_start) for student in students)
| 53.588235 | 135 | 0.737651 |
a97de11a327948e60a5be51bfb7dd6d256a68015 | 294 | py | Python | src/interpreter/functions/join.py | BowlingPizzaBall/b-star | 3e614443281702e124309ea7496314519b707b5f | [
"MIT"
] | null | null | null | src/interpreter/functions/join.py | BowlingPizzaBall/b-star | 3e614443281702e124309ea7496314519b707b5f | [
"MIT"
] | null | null | null | src/interpreter/functions/join.py | BowlingPizzaBall/b-star | 3e614443281702e124309ea7496314519b707b5f | [
"MIT"
] | null | null | null | from typing import List
from src.interpreter.expression import Expression
def join(block: List, codebase):
first = Expression(block[1], codebase)
buffer = first
for i in block[2:]:
buffer.extend(Expression(i, codebase))
buffer = "".join(buffer)
return buffer
| 22.615385 | 49 | 0.676871 |
dfd8f2c9cd377f177dd3a5d56d1843c2c157db91 | 139 | py | Python | PythonNetwork/venv/Lib/site-packages/pika/exchange_type.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 2,479 | 2015-01-01T20:06:23.000Z | 2022-03-31T13:29:19.000Z | PythonNetwork/venv/Lib/site-packages/pika/exchange_type.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 813 | 2015-01-07T07:13:49.000Z | 2022-03-28T05:05:06.000Z | PythonNetwork/venv/Lib/site-packages/pika/exchange_type.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 763 | 2015-01-10T04:38:33.000Z | 2022-03-31T07:24:57.000Z | from enum import Enum
class ExchangeType(Enum) :
direct = 'direct'
fanout = 'fanout'
headers = 'headers'
topic = 'topic'
| 15.444444 | 26 | 0.625899 |
a01a9b4a677c716941181dab82edea8742c71e82 | 15,419 | py | Python | data/data_utils.py | iostermann/deeplab2 | 68822b0c76c2c757a11402ac597711f926990f04 | [
"Apache-2.0"
] | null | null | null | data/data_utils.py | iostermann/deeplab2 | 68822b0c76c2c757a11402ac597711f926990f04 | [
"Apache-2.0"
] | null | null | null | data/data_utils.py | iostermann/deeplab2 | 68822b0c76c2c757a11402ac597711f926990f04 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common utility functions and classes for building dataset."""
import collections
import io
import PIL.Image
import numpy as np
from PIL import Image
from PIL import ImageOps
import tensorflow as tf
from deeplab2 import common
_PANOPTIC_LABEL_FORMAT = 'raw'
def read_image(image_data):
"""Decodes image from in-memory data.
Args:
image_data: Bytes data representing encoded image.
Returns:
Decoded PIL.Image object.
"""
image = Image.open(io.BytesIO(image_data))
try:
image = ImageOps.exif_transpose(image)
except TypeError:
# capture and ignore this bug:
# https://github.com/python-pillow/Pillow/issues/3973
pass
image = image.resize((256, 256), resample=PIL.Image.LANCZOS)
return image
def read_mask(image_data):
"""Decodes image from in-memory data.
Args:
image_data: Bytes data representing encoded image.
Returns:
Decoded PIL.Image object.
"""
image = Image.open(io.BytesIO(image_data))
try:
image = ImageOps.exif_transpose(image)
except TypeError:
# capture and ignore this bug:
# https://github.com/python-pillow/Pillow/issues/3973
pass
image = image.resize((256, 256), resample=PIL.Image.NEAREST)
return image
def get_image_dims(image_data, check_is_rgb=False):
"""Decodes image and return its height and width.
Args:
image_data: Bytes data representing encoded image.
check_is_rgb: Whether to check encoded image is RGB.
Returns:
Decoded image size as a tuple of (height, width)
Raises:
ValueError: If check_is_rgb is set and input image has other format.
"""
image = read_image(image_data)
if check_is_rgb and image.mode != 'RGB':
raise ValueError('Expects RGB image data, gets mode: %s' % image.mode)
width, height = image.size
return height, width
def _int64_list_feature(values):
"""Returns a TF-Feature of int64_list.
Args:
values: A scalar or an iterable of integer values.
Returns:
A TF-Feature.
"""
if not isinstance(values, collections.Iterable):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _bytes_list_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
if isinstance(values, str):
values = values.encode()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def create_features(image_data,
image_format,
filename,
label_data=None,
label_format=None):
"""Creates image/segmentation features.
Args:
image_data: String or byte stream of encoded image data.
image_format: String, image data format, should be either 'jpeg', 'jpg', or
'png'.
filename: String, image filename.
label_data: String or byte stream of (potentially) encoded label data. If
None, we skip to write it to tf.train.Example.
label_format: String, label data format, should be either 'png' or 'raw'. If
None, we skip to write it to tf.train.Example.
Returns:
A dictionary of feature name to tf.train.Feature maaping.
"""
if image_format not in ('jpeg', 'png', 'jpg'):
raise ValueError('Unsupported image format: %s' % image_format)
# Check color mode, and convert grey image to rgb image.
image = read_image(image_data)
if image.mode != 'RGB':
image = image.convert('RGB')
image_data = io.BytesIO()
image.save(image_data, format=image_format)
image_data = image_data.getvalue()
height, width = get_image_dims(image_data, check_is_rgb=True)
feature_dict = {
common.KEY_ENCODED_IMAGE: _bytes_list_feature(image_data),
common.KEY_IMAGE_FILENAME: _bytes_list_feature(filename),
common.KEY_IMAGE_FORMAT: _bytes_list_feature(image_format),
common.KEY_IMAGE_HEIGHT: _int64_list_feature(height),
common.KEY_IMAGE_WIDTH: _int64_list_feature(width),
common.KEY_IMAGE_CHANNELS: _int64_list_feature(3),
}
if label_data is None:
return feature_dict
if label_format == 'png':
label_height, label_width = get_image_dims(label_data)
if (label_height, label_width) != (height, width):
raise ValueError('Image (%s) and label (%s) shape mismatch' %
((height, width), (label_height, label_width)))
elif label_format == 'raw':
# Raw label encodes int32 array.
expected_label_size = height * width * np.dtype(np.int32).itemsize
if len(label_data) != expected_label_size:
raise ValueError('Expects raw label data length %d, gets %d' %
(expected_label_size, len(label_data)))
else:
raise ValueError('Unsupported label format: %s' % label_format)
feature_dict.update({
common.KEY_ENCODED_LABEL: _bytes_list_feature(label_data),
common.KEY_LABEL_FORMAT: _bytes_list_feature(label_format)
})
return feature_dict
def create_tfexample(image_data,
image_format,
filename,
label_data=None,
label_format=None):
"""Converts one image/segmentation pair to TF example.
Args:
image_data: String or byte stream of encoded image data.
image_format: String, image data format, should be either 'jpeg' or 'png'.
filename: String, image filename.
label_data: String or byte stream of (potentially) encoded label data. If
None, we skip to write it to tf.train.Example.
label_format: String, label data format, should be either 'png' or 'raw'. If
None, we skip to write it to tf.train.Example.
Returns:
TF example proto.
"""
feature_dict = create_features(image_data, image_format, filename, label_data,
label_format)
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
def create_video_tfexample(image_data,
image_format,
filename,
sequence_id,
image_id,
label_data=None,
label_format=None,
prev_image_data=None,
prev_label_data=None):
"""Converts one video frame/panoptic segmentation pair to TF example.
Args:
image_data: String or byte stream of encoded image data.
image_format: String, image data format, should be either 'jpeg' or 'png'.
filename: String, image filename.
sequence_id: ID of the video sequence as a string.
image_id: ID of the image as a string.
label_data: String or byte stream of (potentially) encoded label data. If
None, we skip to write it to tf.train.Example.
label_format: String, label data format, should be either 'png' or 'raw'. If
None, we skip to write it to tf.train.Example.
prev_image_data: An optional string or byte stream of encoded previous image
data.
prev_label_data: An optional string or byte stream of (potentially) encoded
previous label data.
Returns:
TF example proto.
"""
feature_dict = create_features(image_data, image_format, filename, label_data,
label_format)
feature_dict.update({
common.KEY_SEQUENCE_ID: _bytes_list_feature(sequence_id),
common.KEY_FRAME_ID: _bytes_list_feature(image_id)
})
if prev_image_data is not None:
feature_dict[common.KEY_ENCODED_PREV_IMAGE] = _bytes_list_feature(
prev_image_data)
if prev_label_data is not None:
feature_dict[common.KEY_ENCODED_PREV_LABEL] = _bytes_list_feature(
prev_label_data)
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
def create_video_and_depth_tfexample(image_data,
image_format,
filename,
sequence_id,
image_id,
label_data=None,
label_format=None,
next_image_data=None,
next_label_data=None,
depth_data=None,
depth_format=None):
"""Converts an image/segmentation pair and depth of first frame to TF example.
The image pair contains the current frame and the next frame with the
current frame including depth label.
Args:
image_data: String or byte stream of encoded image data.
image_format: String, image data format, should be either 'jpeg' or 'png'.
filename: String, image filename.
sequence_id: ID of the video sequence as a string.
image_id: ID of the image as a string.
label_data: String or byte stream of (potentially) encoded label data. If
None, we skip to write it to tf.train.Example.
label_format: String, label data format, should be either 'png' or 'raw'. If
None, we skip to write it to tf.train.Example.
next_image_data: An optional string or byte stream of encoded next image
data.
next_label_data: An optional string or byte stream of (potentially) encoded
next label data.
depth_data: An optional string or byte sream of encoded depth data.
depth_format: String, depth data format, should be either 'png' or 'raw'.
Returns:
TF example proto.
"""
feature_dict = create_features(image_data, image_format, filename, label_data,
label_format)
feature_dict.update({
common.KEY_SEQUENCE_ID: _bytes_list_feature(sequence_id),
common.KEY_FRAME_ID: _bytes_list_feature(image_id)
})
if next_image_data is not None:
feature_dict[common.KEY_ENCODED_NEXT_IMAGE] = _bytes_list_feature(
next_image_data)
if next_label_data is not None:
feature_dict[common.KEY_ENCODED_NEXT_LABEL] = _bytes_list_feature(
next_label_data)
if depth_data is not None:
feature_dict[common.KEY_ENCODED_DEPTH] = _bytes_list_feature(
depth_data)
feature_dict[common.KEY_DEPTH_FORMAT] = _bytes_list_feature(
depth_format)
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
class SegmentationDecoder(object):
"""Basic parser to decode serialized tf.Example."""
def __init__(self,
is_panoptic_dataset=True,
is_video_dataset=False,
is_depth_dataset=False,
use_two_frames=False,
use_next_frame=False,
decode_groundtruth_label=True):
self._is_panoptic_dataset = is_panoptic_dataset
self._is_video_dataset = is_video_dataset
self._is_depth_dataset = is_depth_dataset
self._use_two_frames = use_two_frames
self._use_next_frame = use_next_frame
self._decode_groundtruth_label = decode_groundtruth_label
string_feature = tf.io.FixedLenFeature((), tf.string)
int_feature = tf.io.FixedLenFeature((), tf.int64)
self._keys_to_features = {
common.KEY_ENCODED_IMAGE: string_feature,
common.KEY_IMAGE_FILENAME: string_feature,
common.KEY_IMAGE_FORMAT: string_feature,
common.KEY_IMAGE_HEIGHT: int_feature,
common.KEY_IMAGE_WIDTH: int_feature,
common.KEY_IMAGE_CHANNELS: int_feature,
}
if decode_groundtruth_label:
self._keys_to_features[common.KEY_ENCODED_LABEL] = string_feature
if self._is_video_dataset:
self._keys_to_features[common.KEY_SEQUENCE_ID] = string_feature
self._keys_to_features[common.KEY_FRAME_ID] = string_feature
# Two-frame specific processing.
if self._use_two_frames:
self._keys_to_features[common.KEY_ENCODED_PREV_IMAGE] = string_feature
if decode_groundtruth_label:
self._keys_to_features[common.KEY_ENCODED_PREV_LABEL] = string_feature
# Next-frame specific processing.
if self._use_next_frame:
self._keys_to_features[common.KEY_ENCODED_NEXT_IMAGE] = string_feature
if decode_groundtruth_label:
self._keys_to_features[common.KEY_ENCODED_NEXT_LABEL] = string_feature
# Depth specific processing.
if self._is_depth_dataset and decode_groundtruth_label:
self._keys_to_features[common.KEY_ENCODED_DEPTH] = string_feature
def _decode_image(self, parsed_tensors, key):
"""Decodes image udner key from parsed tensors."""
image = tf.io.decode_image(
parsed_tensors[key],
channels=3,
dtype=tf.dtypes.uint8,
expand_animations=False)
image.set_shape([None, None, 3])
return image
def _decode_label(self, parsed_tensors, label_key):
"""Decodes segmentation label under label_key from parsed tensors."""
if self._is_panoptic_dataset:
flattened_label = tf.io.decode_raw(
parsed_tensors[label_key], out_type=tf.int32)
label_shape = tf.stack([
parsed_tensors[common.KEY_IMAGE_HEIGHT],
parsed_tensors[common.KEY_IMAGE_WIDTH], 1
])
label = tf.reshape(flattened_label, label_shape)
return label
label = tf.io.decode_image(parsed_tensors[label_key], channels=1)
label.set_shape([None, None, 1])
return label
def __call__(self, serialized_example):
parsed_tensors = tf.io.parse_single_example(
serialized_example, features=self._keys_to_features)
return_dict = {
'image':
self._decode_image(parsed_tensors, common.KEY_ENCODED_IMAGE),
'image_name':
parsed_tensors[common.KEY_IMAGE_FILENAME],
'height':
tf.cast(parsed_tensors[common.KEY_IMAGE_HEIGHT], dtype=tf.int32),
'width':
tf.cast(parsed_tensors[common.KEY_IMAGE_WIDTH], dtype=tf.int32),
}
return_dict['label'] = None
if self._decode_groundtruth_label:
return_dict['label'] = self._decode_label(parsed_tensors,
common.KEY_ENCODED_LABEL)
if self._is_video_dataset:
return_dict['sequence'] = parsed_tensors[common.KEY_SEQUENCE_ID]
if self._use_two_frames:
return_dict['prev_image'] = self._decode_image(
parsed_tensors, common.KEY_ENCODED_PREV_IMAGE)
if self._decode_groundtruth_label:
return_dict['prev_label'] = self._decode_label(
parsed_tensors, common.KEY_ENCODED_PREV_LABEL)
if self._use_next_frame:
return_dict['next_image'] = self._decode_image(
parsed_tensors, common.KEY_ENCODED_NEXT_IMAGE)
if self._decode_groundtruth_label:
return_dict['next_label'] = self._decode_label(
parsed_tensors, common.KEY_ENCODED_NEXT_LABEL)
if self._is_depth_dataset and self._decode_groundtruth_label:
return_dict['depth'] = self._decode_label(
parsed_tensors, common.KEY_ENCODED_DEPTH)
return return_dict
| 36.365566 | 80 | 0.683896 |
2ec3d1256d6b8bec59c82a8435b2e4d0de301058 | 841 | py | Python | dynamic_schemas/urls.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | 1 | 2018-01-23T12:23:48.000Z | 2018-01-23T12:23:48.000Z | dynamic_schemas/urls.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | 1 | 2018-01-19T08:43:59.000Z | 2018-01-23T12:20:43.000Z | dynamic_schemas/urls.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
app_name = 'dynamic_schemas'
urlpatterns = [
url(r'^$', views.SchemaIndexView.as_view(), name='schema_list'),
url(r'^(?P<pk>[0-9]+)/$', views.SchemaView.as_view(), name='schema_view'),
# url(r'^(?P<pk>[0-9]+)/$', views.SchemaView, name='schema_detail'),
url(r'^(?P<pk>[0-9]+)/create/$', views.form_view, name='create_form'),
# url(r'^(?P<pk>[0-9]+)/(?P<r_pk>[0-9]+)/update/$',
# views.ResponseUpdate.as_view(), name='update_response'),
url(r'^(?P<pk>[0-9]+)/(?P<r_pk>[0-9]+)/update/$',
views.form_update_view, name='update_response'),
url(r'^(?P<pk>[0-9]+)/responses/$', views.ResponseList.as_view(),
name='list_responses'),
url(r'^(?P<pk>[0-9]+)/columns/$', views.ResponseColumns.as_view(),
name='response_columns'),
]
| 38.227273 | 78 | 0.600476 |
58f085b8d05a894a8d0996797d52f791b52fe2d6 | 963 | py | Python | app/user/views.py | arifhusaini97/recipe-app-api | 3011624d87d56c285b0bad8c647e0a847d7e030d | [
"MIT"
] | null | null | null | app/user/views.py | arifhusaini97/recipe-app-api | 3011624d87d56c285b0bad8c647e0a847d7e030d | [
"MIT"
] | null | null | null | app/user/views.py | arifhusaini97/recipe-app-api | 3011624d87d56c285b0bad8c647e0a847d7e030d | [
"MIT"
] | null | null | null | # from django.shortcuts import render
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
| 34.392857 | 66 | 0.790239 |
3503bb574032e8da9c8fc5b5708f717a611803a3 | 20,227 | py | Python | plugins/modules/oci_data_safe_user_assessment_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_data_safe_user_assessment_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_data_safe_user_assessment_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_user_assessment_facts
short_description: Fetches details about one or multiple UserAssessment resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple UserAssessment resources in Oracle Cloud Infrastructure
- Gets a list of user assessments.
- The ListUserAssessments operation returns only the assessments in the specified `compartmentId`.
The list does not include any subcompartments of the compartmentId passed.
- The parameter `accessLevel` specifies whether to return only those compartments for which the
requestor has INSPECT permissions on at least one resource directly
or indirectly (ACCESSIBLE) (the resource can be in a subcompartment) or to return Not Authorized if
Principal doesn't have access to even one of the child compartments. This is valid only when
`compartmentIdInSubtree` is set to `true`.
- The parameter `compartmentIdInSubtree` applies when you perform ListUserAssessments on the
`compartmentId` passed and when it is set to true, the entire hierarchy of compartments can be returned.
To get a full list of all compartments and subcompartments in the tenancy (root compartment),
set the parameter `compartmentIdInSubtree` to true and `accessLevel` to ACCESSIBLE.
- If I(user_assessment_id) is specified, the details of a single UserAssessment will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
user_assessment_id:
description:
- The OCID of the user assessment.
- Required to get a specific user_assessment.
type: str
aliases: ["id"]
compartment_id:
description:
- A filter to return only resources that match the specified compartment OCID.
- Required to list multiple user_assessments.
type: str
compartment_id_in_subtree:
description:
- Default is false.
When set to true, the hierarchy of compartments is traversed and all compartments and subcompartments in the tenancy are returned. Depends on the
'accessLevel' setting.
type: bool
access_level:
description:
- Valid values are RESTRICTED and ACCESSIBLE. Default is RESTRICTED.
Setting this to ACCESSIBLE returns only those compartments for which the
user has INSPECT permissions directly or indirectly (permissions can be on a
resource in a subcompartment). When set to RESTRICTED permissions are checked and no partial results are displayed.
type: str
choices:
- "RESTRICTED"
- "ACCESSIBLE"
display_name:
description:
- A filter to return only resources that match the specified display name.
type: str
aliases: ["name"]
schedule_user_assessment_id:
description:
- The OCID of the user assessment of type SAVE_SCHEDULE.
type: str
is_schedule_assessment:
description:
- A filter to return only user assessments of type SAVE_SCHEDULE.
type: bool
is_baseline:
description:
- A filter to return only user assessments that are set as baseline.
type: bool
target_id:
description:
- A filter to return only items that match the specified target.
type: str
type:
description:
- A filter to return only items that match the specified assessment type.
type: str
choices:
- "LATEST"
- "SAVED"
- "COMPARTMENT"
- "SAVE_SCHEDULE"
triggered_by:
description:
- A filter to return user assessments that were created by either the system or by a user only.
type: str
choices:
- "USER"
- "SYSTEM"
time_created_greater_than_or_equal_to:
description:
- A filter to return only user assessments that were created after the specified date and time, as defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
Using timeCreatedGreaterThanOrEqualTo parameter retrieves all assessments created after that date.
- "**Example:** 2016-12-19T16:39:57.600Z"
type: str
time_created_less_than:
description:
- "Search for items that were created before a specific date.
Specifying this parameter corresponding `timeCreatedLessThan`
parameter will retrieve all items created before the
specified created date, in \\"YYYY-MM-ddThh:mmZ\\" format with a Z offset, as
defined by RFC 3339."
- "**Example:** 2016-12-19T16:39:57.600Z"
type: str
lifecycle_state:
description:
- The current state of the user assessment.
type: str
choices:
- "CREATING"
- "SUCCEEDED"
- "UPDATING"
- "DELETING"
- "FAILED"
sort_order:
description:
- The sort order to use, either ascending (ASC) or descending (DESC).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. You can specify only one sort order (sortOrder). The default order for timeCreated is descending.
type: str
choices:
- "timeCreated"
- "displayName"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific user_assessment
oci_data_safe_user_assessment_facts:
# required
user_assessment_id: "ocid1.userassessment.oc1..xxxxxxEXAMPLExxxxxx"
- name: List user_assessments
oci_data_safe_user_assessment_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id_in_subtree: true
access_level: RESTRICTED
display_name: display_name_example
schedule_user_assessment_id: "ocid1.scheduleuserassessment.oc1..xxxxxxEXAMPLExxxxxx"
is_schedule_assessment: true
is_baseline: true
target_id: "ocid1.target.oc1..xxxxxxEXAMPLExxxxxx"
type: LATEST
triggered_by: USER
time_created_greater_than_or_equal_to: 2013-10-20T19:20:30+01:00
time_created_less_than: 2013-10-20T19:20:30+01:00
lifecycle_state: CREATING
sort_order: ASC
sort_by: timeCreated
"""
RETURN = """
user_assessments:
description:
- List of UserAssessment resources
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment that contains the user assessment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
description:
description:
- The description of the user assessment.
returned: on success
type: str
sample: description_example
display_name:
description:
- The display name of the user assessment.
returned: on success
type: str
sample: display_name_example
id:
description:
- The OCID of the user assessment.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
ignored_targets:
description:
- "List containing maps as values.
Example: `{\\"Operations\\": [ {\\"CostCenter\\": \\"42\\"} ] }`"
returned: on success
type: list
sample: []
ignored_assessment_ids:
description:
- "List containing maps as values.
Example: `{\\"Operations\\": [ {\\"CostCenter\\": \\"42\\"} ] }`"
returned: on success
type: list
sample: []
is_baseline:
description:
- Indicates if the user assessment is set as a baseline. This is applicable only to saved user assessments.
returned: on success
type: bool
sample: true
is_deviated_from_baseline:
description:
- Indicates if the user assessment deviates from the baseline.
returned: on success
type: bool
sample: true
last_compared_baseline_id:
description:
- The OCID of the last user assessment baseline against which the latest assessment was compared.
returned: on success
type: str
sample: "ocid1.lastcomparedbaseline.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the user assessment.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- Details about the current state of the user assessment.
returned: on success
type: str
sample: lifecycle_details_example
schedule_assessment_id:
description:
- The OCID of the user assessment that is responsible for creating this scheduled save assessment.
returned: on success
type: str
sample: "ocid1.scheduleassessment.oc1..xxxxxxEXAMPLExxxxxx"
schedule:
description:
- "Schedule of the assessment that runs periodically in this specified format: <version-string>;<version-specific-schedule>
Allowed version strings - v1
v1's version specific schedule -<ss> <mm> <hh> <day-of-week> <day-of-month>
Each of the above fields potentially introduce constraints. A workrequest is created only
when clock time satisfies all the constraints. Constraints introduced
1. seconds = <ss> (So, the allowed range for <ss> is [0, 59])
2. minutes = <mm> (So, the allowed range for <mm> is [0, 59])
3. hours = <hh> (So, the allowed range for <hh> is [0, 23])
<day-of-week> can be either '*' (without quotes or a number between 1(Monday) and 7(Sunday))
4. No constraint introduced when it is '*'. When not, day of week must equal the given value
<day-of-month> can be either '*' (without quotes or a number between 1 and 28)
5. No constraint introduced when it is '*'. When not, day of month must equal the given value"
returned: on success
type: str
sample: "1. '0 30 13 * *' - This indicates to run a user assessment at 13:30:00 every day"
statistics:
description:
- "Map that contains maps of values.
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {}
target_ids:
description:
- Array of database target OCIDs.
returned: on success
type: list
sample: []
time_created:
description:
- The date and time the user assessment was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the user assessment was last updated, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
triggered_by:
description:
- Indicates whether the user assessment was created by system or user.
returned: on success
type: str
sample: USER
type:
description:
- "Type of user assessment. Type can be:"
- "LATEST: The most up-to-date assessment that is running automatically for a target. It is system generated.
SAVED: A saved user assessment. LATEST assessments will always be saved to maintain the history of runs. A SAVED assessment is also generated
by a 'refresh' action (triggered by the user).
SAVE_SCHEDULE: A schedule to periodically save LATEST assessments.
COMPARTMENT: An automatic managed assessment type that stores all details of targets in one compartment. This will keep an up-to-date status
of all database risks in one compartment.
It is automatically updated once the latest assessment or refresh action is executed, as well as when a target is deleted or move to a
different compartment."
returned: on success
type: str
sample: LATEST
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see
L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see Resource Tags.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: [{
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"description": "description_example",
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"ignored_targets": [],
"ignored_assessment_ids": [],
"is_baseline": true,
"is_deviated_from_baseline": true,
"last_compared_baseline_id": "ocid1.lastcomparedbaseline.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"schedule_assessment_id": "ocid1.scheduleassessment.oc1..xxxxxxEXAMPLExxxxxx",
"schedule": "1. '0 30 13 * *' - This indicates to run a user assessment at 13:30:00 every day",
"statistics": {},
"target_ids": [],
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"triggered_by": "USER",
"type": "LATEST",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeUserAssessmentFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"user_assessment_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_user_assessment,
user_assessment_id=self.module.params.get("user_assessment_id"),
)
def list_resources(self):
optional_list_method_params = [
"compartment_id_in_subtree",
"access_level",
"display_name",
"schedule_user_assessment_id",
"is_schedule_assessment",
"is_baseline",
"target_id",
"type",
"triggered_by",
"time_created_greater_than_or_equal_to",
"time_created_less_than",
"lifecycle_state",
"sort_order",
"sort_by",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_user_assessments,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
DataSafeUserAssessmentFactsHelperCustom = get_custom_class(
"DataSafeUserAssessmentFactsHelperCustom"
)
class ResourceFactsHelper(
DataSafeUserAssessmentFactsHelperCustom, DataSafeUserAssessmentFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
user_assessment_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
compartment_id_in_subtree=dict(type="bool"),
access_level=dict(type="str", choices=["RESTRICTED", "ACCESSIBLE"]),
display_name=dict(aliases=["name"], type="str"),
schedule_user_assessment_id=dict(type="str"),
is_schedule_assessment=dict(type="bool"),
is_baseline=dict(type="bool"),
target_id=dict(type="str"),
type=dict(
type="str", choices=["LATEST", "SAVED", "COMPARTMENT", "SAVE_SCHEDULE"]
),
triggered_by=dict(type="str", choices=["USER", "SYSTEM"]),
time_created_greater_than_or_equal_to=dict(type="str"),
time_created_less_than=dict(type="str"),
lifecycle_state=dict(
type="str",
choices=["CREATING", "SUCCEEDED", "UPDATING", "DELETING", "FAILED"],
),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["timeCreated", "displayName"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="user_assessment",
service_client_class=DataSafeClient,
namespace="data_safe",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(user_assessments=result)
if __name__ == "__main__":
main()
| 40.616466 | 159 | 0.616602 |
84dbcc600890b03809da1472b5b26e4b3d777d11 | 11,930 | py | Python | src/rnn_model.py | hao-cheng/ee596_spr2019_lab4 | 487a91c3dcc4d1982e9aa8149630bc3549c883f0 | [
"MIT"
] | null | null | null | src/rnn_model.py | hao-cheng/ee596_spr2019_lab4 | 487a91c3dcc4d1982e9aa8149630bc3549c883f0 | [
"MIT"
] | null | null | null | src/rnn_model.py | hao-cheng/ee596_spr2019_lab4 | 487a91c3dcc4d1982e9aa8149630bc3549c883f0 | [
"MIT"
] | null | null | null | import os
import sys
import pickle
import numpy as np
from .rnn_unit import RnnUnit
from .softmax_unit import SoftmaxUnit
DTYPE = np.double
class RnnModel:
"""An RNN model with an softmax output layer."""
def __init__(self,
init_range=0.1,
learning_rate=0.1,
verbose=True,
batch_size=1,
bptt_unfold_level=1,
input_size=0,
hidden_size=0,
output_size=0):
# optimization parameters
self.init_range = init_range
self.learning_rate = learning_rate
self.verbose = verbose
self.batch_size = batch_size
# neuralnet structure params
self.bptt_unfold_level = bptt_unfold_level
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.rnn_units = []
self.softmax_units = []
self.Whx = None
self.Whh = None
self.Woh = None
self.bo = None
self.last_Whx = None
self.last_Whh = None
self.last_Woh = None
self.last_bo = None
self.hprev = None
def initialize_parameters(self):
"""Randomly initializes the connection weights (bias stays at 0)"""
self.Whx += np.random.uniform(-self.init_range, self.init_range, self.Whx.shape)
self.Whh += np.random.uniform(-self.init_range, self.init_range, self.Whh.shape)
self.Woh += np.random.uniform(-self.init_range, self.init_range, self.Woh.shape)
def reset_states(self, idxs=None):
if idxs is None:
idxs = list(range(self.hprev.shape[1]))
self.hprev[:, idxs] = 0
def allocate_model(self):
"""Allocates model parameters and placeholders"""
# Allocate model parameters
self.Whx = np.zeros([self.hidden_size, self.input_size], dtype=DTYPE)
self.Whh = np.zeros([self.hidden_size, self.hidden_size], dtype=DTYPE)
self.Woh = np.zeros([self.output_size, self.hidden_size], dtype=DTYPE)
self.bo = np.zeros([self.output_size, 1], dtype=DTYPE)
self.last_Whx = np.zeros([self.hidden_size, self.input_size], dtype=DTYPE)
self.last_Whh = np.zeros([self.hidden_size, self.hidden_size], dtype=DTYPE)
self.last_Woh = np.zeros([self.output_size, self.hidden_size], dtype=DTYPE)
self.last_bo = np.zeros([self.output_size, 1], dtype=DTYPE)
# Allocate states
self.hprev = np.zeros([self.hidden_size, self.batch_size], dtype=DTYPE)
# Allocate activations and softmax
for _ in range(self.bptt_unfold_level):
self.rnn_units.append(RnnUnit(self.hidden_size, self.batch_size, DTYPE))
self.softmax_units.append(SoftmaxUnit(self.output_size, self.batch_size, DTYPE))
def read_model(self, fname, eval=True):
"""Reads model from file"""
if not os.path.exists(fname):
print(
'Error: Model file {} does not exist!\n'.format(fname),
file=sys.stderr
)
exit(1)
with open(fname, 'rb') as fin:
model = pickle.load(fin)
print('=========Reading Model========')
self.init_range = model['init_range']
self.input_size = model['input_size']
self.hidden_size = model['hidden_size']
self.output_size = model['output_size']
self.learning_rate = model['learning_rate']
if eval:
self.bptt_unfold_level = 1
self.batch_size = 1
else:
self.bptt_unfold_level = model['bptt_unfold_level']
self.allocate_model()
self.Whx = model['Whx']
self.Whh = model['Whh']
self.Woh = model['Woh']
self.bo = model['bo']
print('=========Reading Done========')
def write_model(self, fname):
"""Writes model to file"""
model = dict()
model['init_range'] = self.init_range
model['input_size'] = self.input_size
model['hidden_size'] = self.hidden_size
model['output_size'] = self.output_size
model['learning_rate'] = self.learning_rate
model['bptt_unfold_level'] = self.bptt_unfold_level
model['Whx'] = self.Whx
model['Whh'] = self.Whh
model['Woh'] = self.Woh
model['bo'] = self.bo
with open(fname, 'wb') as fout:
print('=========Writing Model========')
pickle.dump(model, fout)
print('=========Writing Done========')
def forward_propagate(self, input_idxs, target_idxs):
loss = 0
probs = []
for i, (input_idx, target_idx) in enumerate(zip(input_idxs, target_idxs)):
assert len(input_idx) == self.batch_size
assert len(target_idx) == 2
assert len(target_idx[0]) == self.batch_size
assert len(target_idx[1]) == self.batch_size
x = np.zeros([self.input_size, self.batch_size], dtype=DTYPE)
x[input_idx, list(range(self.batch_size))] = 1.0
# =========================
# TODO: finish the codes here
# h = ...
raise NotImplementedError()
# =========================
p = self.softmax_units[i].forward_function(h, self.Woh, self.bo)
probs += [p]
loss += self.softmax_units[i].compute_loss(target_idx)
self.hprev = h
return loss, probs
def backward_propagate(self, input_idxs, target_idxs):
dWhh = np.zeros(self.Whh.shape)
dWoh = np.zeros(self.Woh.shape)
dWhx = np.zeros(self.Whx.shape)
dbo = np.zeros(self.bo.shape)
dEdh = np.zeros([self.hidden_size, self.batch_size])
for i in range(self.bptt_unfold_level-1, -1, -1):
target_idx = target_idxs[i]
input_idx = input_idxs[i]
# Retrieve activations
h = self.rnn_units[i].h
if i > 0:
hprev = self.rnn_units[i-1].h
else:
hprev = np.zeros([self.hidden_size, self.batch_size])
# Backprop the Softmax
(
dEdh_softmax, l_dWoh, l_dbo
) = self.softmax_units[i].backward_function(
target_idx, h, self.Woh, self.bo)
# Backprop the RNN
x = np.zeros([self.input_size, self.batch_size], dtype=DTYPE)
x[input_idx, list(range(self.batch_size))] = 1.0
# =========================
# TODO: finish the codes here
# dEdhprev, l_dWhx, l_dWhh = ...
raise NotImplementedError()
# =========================
# Update the gradient accumulators
dEdh = dEdhprev
dWhh += l_dWhh
dWoh += l_dWoh
dWhx += l_dWhx
dbo += l_dbo
return dWhh, dWoh, dWhx, dbo
def update_weight(self, dWhh, dWoh, dWhx, dbo):
dWhh *= self.learning_rate
dWoh *= self.learning_rate
dWhx *= self.learning_rate
dbo *= self.learning_rate
self.Whh += dWhh
self.Woh += dWoh
self.Whx += dWhx
self.bo += dbo
def restore_model(self):
self.Whh[:] = self.last_Whh
self.Woh[:] = self.last_Woh
self.Whx[:] = self.last_Whx
self.bo[:] = self.last_bo
def cache_model(self):
self.last_Whh[:] = self.Whh
self.last_Woh[:] = self.Woh
self.last_Whx[:] = self.Whx
self.last_bo[:] = self.bo
def test_rnn_model():
"""Tests for the gradient computation of the whole RNN"""
rnn_model = RnnModel(
batch_size=3,
bptt_unfold_level=10,
hidden_size=20,
input_size=5,
output_size=15,
init_range=0.1,
learning_rate=0.1
)
rnn_model.allocate_model()
rnn_model.initialize_parameters()
# Fake indices
input_idxs = []
target_idxs = []
for t in range(rnn_model.bptt_unfold_level):
input_idx = []
target_idx = []
target_mult = []
for b in range(rnn_model.batch_size):
input_ind = np.random.randint(0, rnn_model.input_size)
input_idx.append(input_ind)
target_ind = np.random.randint(0, rnn_model.output_size)
target_idx.append(target_ind)
target_mult.append(1.0)
input_idxs.append(input_idx)
target_idxs.append((target_idx, target_mult))
# print(input_idxs)
# print(target_idxs)
# Numerical gradient computation for Woh
rnn_model.reset_states()
E, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
dWhh, dWoh, dWhx, dbo = rnn_model.backward_propagate(input_idxs, target_idxs)
epsilon = 1e-7
baseWoh = np.copy(rnn_model.Woh)
numdWoh = np.zeros([rnn_model.output_size, rnn_model.hidden_size], dtype=DTYPE)
for i in range(rnn_model.output_size):
for j in range(rnn_model.hidden_size):
newWoh = np.copy(baseWoh)
newWoh[i, j] += epsilon
rnn_model.Woh = newWoh
rnn_model.reset_states()
newE, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
numdWoh[i, j] = (newE - E) / epsilon
diff = abs(np.sum(numdWoh - dWoh))
assert diff < 1e-4
print('dWoh test passed! abs(expected - actual) =', diff)
# Numerical gradient computation for dbo
rnn_model.reset_states()
E, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
dWhh, dWoh, dWhx, dbo = rnn_model.backward_propagate(input_idxs, target_idxs)
epsilon = 1e-7
basebo = np.copy(rnn_model.bo)
numdbo = np.zeros([rnn_model.output_size, 1], dtype=DTYPE)
for i in range(rnn_model.output_size):
newbo = np.copy(basebo)
newbo[i] += epsilon
rnn_model.bo = newbo
rnn_model.reset_states()
newE, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
numdbo[i] = (newE - E) / epsilon
diff = abs(np.sum(numdbo - dbo))
assert diff < 1e-4
print('dbo test passed! abs(expected - actual) =', diff)
# Numerical gradient computation for Whx
rnn_model.reset_states()
E, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
dWhh, dWoh, dWhx, dbo = rnn_model.backward_propagate(input_idxs, target_idxs)
epsilon = 1e-7
baseWhx = np.copy(rnn_model.Whx)
numdWhx = np.zeros([rnn_model.hidden_size, rnn_model.input_size], dtype=DTYPE)
for i in range(rnn_model.hidden_size):
for j in range(rnn_model.input_size):
newWhx = np.copy(baseWhx)
newWhx[i, j] += epsilon
rnn_model.Whx = newWhx
rnn_model.reset_states()
newE, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
numdWhx[i, j] = (newE - E) / epsilon
diff = abs(np.sum(numdWhx - dWhx))
assert diff < 1e-4
print('dWhx test passed! abs(expected - actual) =', diff)
# Numerical gradient computation for Whh
rnn_model.reset_states()
E, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
dWhh, dWoh, dWhx, dbo = rnn_model.backward_propagate(input_idxs, target_idxs)
epsilon = 1e-7
baseWhh = np.copy(rnn_model.Whh)
numdWhh = np.zeros([rnn_model.hidden_size, rnn_model.hidden_size], dtype=DTYPE)
for i in range(rnn_model.hidden_size):
for j in range(rnn_model.hidden_size):
newWhh = np.copy(baseWhh)
newWhh[i, j] += epsilon
rnn_model.Whh = newWhh
rnn_model.reset_states()
newE, _ = rnn_model.forward_propagate(input_idxs, target_idxs)
numdWhh[i, j] = (newE - E) / epsilon
diff = abs(np.sum(numdWhh - dWhh))
assert diff < 1e-4
print('dWhh test passed! abs(expected - actual) =', diff)
if __name__ == '__main__':
test_rnn_model()
| 34.883041 | 92 | 0.58793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.