edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
#!/usr/bin/env python
import argparse
import random
import subprocess
import sys
import time
from pathlib import Path
from typing import List
import numpy as np
def _get_args() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=["train", "evaluate"])
parser.add_argument('--directory', type=str, required=True)
parser.add_argument('--gpu-ids', type=int, nargs='+', required=True)
parser.add_argument('--runs-per-gpu', type=int, required=True)
args = vars(parser.parse_args())
args["directory"] = Path(args["directory"])
if not args["directory"].is_dir():
raise ValueError(f"No folder at {args["directory"]}")
return args
def _main():
args = _get_args()
schedule_runs(**args)
def schedule_runs(mode: str, directory: Path, gpu_ids: List[int], runs_per_gpu: int):
"""Schedule multiple runs across one or multiple GPUs.
Parameters
----------
mode : {'train', 'evaluate'}
Use 'train' if you want to schedule training of multiple models and 'evaluate' if you want to schedule
evaluation of multiple trained models.
directory : Path
If mode is 'train', this path should point to a folder containing the config files (.yml) to use for model
training. For each config file, one run is started. If mode is 'evaluate', this path should point to the folder
containing the different model run directories.
gpu_ids : List[int]
List of GPU ids to use for training/evaluating.
runs_per_gpu : int
Number of runs to start on a single GPU.
"""
if mode == "train":
processes = list(directory.glob('**/*.yml'))
elif mode == "evaluate":
processes = list(directory.glob('*'))
else:
raise ValueError("'mode' must be either 'train' or 'evaluate'")
# if used as command line tool, we need full path's to the fils/directories
processes = [str(p.absolute()) for p in processes]
# for approximately equal memory usage during hyperparam tuning, randomly shuffle list of processes
random.shuffle(processes)
# array to keep track on how many runs are currently running per GPU
n_parallel_runs = len(gpu_ids) * runs_per_gpu
gpu_counter = np.zeros((len(gpu_ids)), dtype=np.int)
# for command line tool, we need full path to the main.py script
script_path = str(Path(__file__).absolute().parent / "nh_run.py")
running_processes = {}
counter = 0
while True:
# start new runs
for _ in range(n_parallel_runs - len(running_processes)):
if counter >= len(processes):
break
# determine which GPU to use
node_id = np.argmin(gpu_counter)
gpu_counter[node_id] += 1
gpu_id = gpu_ids[node_id]
process = processes[counter]
# start run via subprocess call
if mode == "train":
run_command = f"python {script_path} train --config-file {process} --gpu {gpu_id}"
else:
run_command = f"python {script_path} evaluate --run-dir {process} --gpu {gpu_id}"
print(f"Starting run {counter+1}/{len(processes)}: {run_command}")
running_processes[(run_command, node_id)] = subprocess.Popen(run_command,
stdout=subprocess.DEVNULL,
shell=True)
counter += 1
time.sleep(2)
# check for completed runs
for key, process in running_processes.items():
if process.poll() is not None:
print(f"Finished run {key[0]}")
gpu_counter[key[1]] -= 1
print("Cleaning up...\n\n")
try:
_ = process.communicate(timeout=5)
except TimeoutError:
print('')
print("WARNING: PROCESS {} COULD NOT BE REAPED!".format(key))
print('')
running_processes[key] = None
# delete possibly finished runs
running_processes = {key: val for key, val in running_processes.items() if val is not None}
time.sleep(2)
if (len(running_processes) == 0) and (counter >= len(processes)):
break
print("Done")
sys.stdout.flush()
if __name__ == "__main__":
_main()
| #!/usr/bin/env python
import argparse
import random
import subprocess
import sys
import time
from pathlib import Path
from typing import List
import numpy as np
def _get_args() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=["train", "evaluate"])
parser.add_argument('--directory', type=str, required=True)
parser.add_argument('--gpu-ids', type=int, nargs='+', required=True)
parser.add_argument('--runs-per-gpu', type=int, required=True)
args = vars(parser.parse_args())
args["directory"] = Path(args["directory"])
if not args["directory"].is_dir():
raise ValueError(f"No folder at {args['directory']}")
return args
def _main():
args = _get_args()
schedule_runs(**args)
def schedule_runs(mode: str, directory: Path, gpu_ids: List[int], runs_per_gpu: int):
"""Schedule multiple runs across one or multiple GPUs.
Parameters
----------
mode : {'train', 'evaluate'}
Use 'train' if you want to schedule training of multiple models and 'evaluate' if you want to schedule
evaluation of multiple trained models.
directory : Path
If mode is 'train', this path should point to a folder containing the config files (.yml) to use for model
training. For each config file, one run is started. If mode is 'evaluate', this path should point to the folder
containing the different model run directories.
gpu_ids : List[int]
List of GPU ids to use for training/evaluating.
runs_per_gpu : int
Number of runs to start on a single GPU.
"""
if mode == "train":
processes = list(directory.glob('**/*.yml'))
elif mode == "evaluate":
processes = list(directory.glob('*'))
else:
raise ValueError("'mode' must be either 'train' or 'evaluate'")
# if used as command line tool, we need full path's to the fils/directories
processes = [str(p.absolute()) for p in processes]
# for approximately equal memory usage during hyperparam tuning, randomly shuffle list of processes
random.shuffle(processes)
# array to keep track on how many runs are currently running per GPU
n_parallel_runs = len(gpu_ids) * runs_per_gpu
gpu_counter = np.zeros((len(gpu_ids)), dtype=np.int)
# for command line tool, we need full path to the main.py script
script_path = str(Path(__file__).absolute().parent / "nh_run.py")
running_processes = {}
counter = 0
while True:
# start new runs
for _ in range(n_parallel_runs - len(running_processes)):
if counter >= len(processes):
break
# determine which GPU to use
node_id = np.argmin(gpu_counter)
gpu_counter[node_id] += 1
gpu_id = gpu_ids[node_id]
process = processes[counter]
# start run via subprocess call
if mode == "train":
run_command = f"python {script_path} train --config-file {process} --gpu {gpu_id}"
else:
run_command = f"python {script_path} evaluate --run-dir {process} --gpu {gpu_id}"
print(f"Starting run {counter+1}/{len(processes)}: {run_command}")
running_processes[(run_command, node_id)] = subprocess.Popen(run_command,
stdout=subprocess.DEVNULL,
shell=True)
counter += 1
time.sleep(2)
# check for completed runs
for key, process in running_processes.items():
if process.poll() is not None:
print(f"Finished run {key[0]}")
gpu_counter[key[1]] -= 1
print("Cleaning up...\n\n")
try:
_ = process.communicate(timeout=5)
except TimeoutError:
print('')
print("WARNING: PROCESS {} COULD NOT BE REAPED!".format(key))
print('')
running_processes[key] = None
# delete possibly finished runs
running_processes = {key: val for key, val in running_processes.items() if val is not None}
time.sleep(2)
if (len(running_processes) == 0) and (counter >= len(processes)):
break
print("Done")
sys.stdout.flush()
if __name__ == "__main__":
_main()
|
"""
`canarydecoder.Processor`
-------------------------
Processor object for data loading and feature extraction.
"""
import os
import joblib
import numpy as np
from .extract import extract_features, load_all_waves, load_wave
class Processor(object):
def __init__(self, sampling_rate: int, n_fft: int,
hop_length: int, padding: str, trim_below_db: int,
lifter: int = 0):
"""Processor object for data loading and feature extraction.
Arguments:
----------
sampling_rate {int} -- Sampling rate to apply.
n_fft {int} -- Frame size for FFT computation.
hop_length {int} -- Number of samples between each frame.
padding {str} -- Padding mode for derivatives.
trim_below_db {int} -- Log power threshold below which the signal is cut.
"""
self._sr = sampling_rate
self._n_fft = n_fft
self._hop_length = hop_length
self._padding = padding
self._top_db = trim_below_db
self._lifter = lifter
def __repr__(self):
return f"Processor({[str(k)+"="+str(v) for k, v in self.__dict__.items()]})"
def __call__(self, waves, mfcc=True, delta1=True, delta2=True,
workers=-1, backend="threading"):
# Load waves, and extract features from them.
if type(waves) is str:
if os.path.isdir(waves):
all_waves, all_files = load_all_waves(waves, sr=self._sr)
elif os.path.isfile(waves):
all_files = [waves]
all_waves = [load_wave(waves, sr=self._sr)]
else:
raise FileNotFoundError(f"File or directory {waves} not found.")
else:
if type(waves) is np.ndarray:
all_files = [0]
all_waves = [waves]
else:
all_files = [*range(len(waves))]
all_waves = waves
loop = joblib.Parallel(n_jobs=workers, backend=backend)
delayed_features = joblib.delayed(extract_features)
all_features = loop(delayed_features(w, sr=self._sr, hop_length=self._hop_length,
n_fft=self._n_fft, padding=self._padding,
trim_below_db=self._top_db, lifter=self._lifter,
mfcc=mfcc, delta1=delta1, delta2=delta2)
for w in all_waves)
return all_features, all_waves, all_files
| """
`canarydecoder.Processor`
-------------------------
Processor object for data loading and feature extraction.
"""
import os
import joblib
import numpy as np
from .extract import extract_features, load_all_waves, load_wave
class Processor(object):
def __init__(self, sampling_rate: int, n_fft: int,
hop_length: int, padding: str, trim_below_db: int,
lifter: int = 0):
"""Processor object for data loading and feature extraction.
Arguments:
----------
sampling_rate {int} -- Sampling rate to apply.
n_fft {int} -- Frame size for FFT computation.
hop_length {int} -- Number of samples between each frame.
padding {str} -- Padding mode for derivatives.
trim_below_db {int} -- Log power threshold below which the signal is cut.
"""
self._sr = sampling_rate
self._n_fft = n_fft
self._hop_length = hop_length
self._padding = padding
self._top_db = trim_below_db
self._lifter = lifter
def __repr__(self):
return f"Processor({[str(k)+'='+str(v) for k, v in self.__dict__.items()]})"
def __call__(self, waves, mfcc=True, delta1=True, delta2=True,
workers=-1, backend="threading"):
# Load waves, and extract features from them.
if type(waves) is str:
if os.path.isdir(waves):
all_waves, all_files = load_all_waves(waves, sr=self._sr)
elif os.path.isfile(waves):
all_files = [waves]
all_waves = [load_wave(waves, sr=self._sr)]
else:
raise FileNotFoundError(f"File or directory {waves} not found.")
else:
if type(waves) is np.ndarray:
all_files = [0]
all_waves = [waves]
else:
all_files = [*range(len(waves))]
all_waves = waves
loop = joblib.Parallel(n_jobs=workers, backend=backend)
delayed_features = joblib.delayed(extract_features)
all_features = loop(delayed_features(w, sr=self._sr, hop_length=self._hop_length,
n_fft=self._n_fft, padding=self._padding,
trim_below_db=self._top_db, lifter=self._lifter,
mfcc=mfcc, delta1=delta1, delta2=delta2)
for w in all_waves)
return all_features, all_waves, all_files
|
from typing import Iterable
from bot.rules import burst
from tests.bot.rules import DisallowedCase, RuleTest
from tests.helpers import MockMessage
def make_msg(author: str) -> MockMessage:
"""
Init a MockMessage instance with author set to `author`.
This serves as a shorthand / alias to keep the test cases visually clean.
"""
return MockMessage(author=author)
class BurstRuleTests(RuleTest):
"""Tests the `burst` antispam rule."""
def setUp(self):
self.apply = burst.apply
self.config = {"max": 2, "interval": 10}
async def test_allows_messages_within_limit(self):
"""Cases which do not violate the rule."""
cases = (
[make_msg("bob"), make_msg("bob")],
[make_msg("bob"), make_msg("alice"), make_msg("bob")],
)
await self.run_allowed(cases)
async def test_disallows_messages_beyond_limit(self):
"""Cases where the amount of messages exceeds the limit, triggering the rule."""
cases = (
DisallowedCase(
[make_msg("bob"), make_msg("bob"), make_msg("bob")],
("bob",),
3,
),
DisallowedCase(
[make_msg("bob"), make_msg("bob"), make_msg("alice"), make_msg("bob")],
("bob",),
3,
),
)
await self.run_disallowed(cases)
def relevant_messages(self, case: DisallowedCase) -> Iterable[MockMessage]:
return tuple(msg for msg in case.recent_messages if msg.author in case.culprits)
def get_report(self, case: DisallowedCase) -> str:
return f"sent {case.n_violations} messages in {self.config["interval"]}s"
| from typing import Iterable
from bot.rules import burst
from tests.bot.rules import DisallowedCase, RuleTest
from tests.helpers import MockMessage
def make_msg(author: str) -> MockMessage:
"""
Init a MockMessage instance with author set to `author`.
This serves as a shorthand / alias to keep the test cases visually clean.
"""
return MockMessage(author=author)
class BurstRuleTests(RuleTest):
"""Tests the `burst` antispam rule."""
def setUp(self):
self.apply = burst.apply
self.config = {"max": 2, "interval": 10}
async def test_allows_messages_within_limit(self):
"""Cases which do not violate the rule."""
cases = (
[make_msg("bob"), make_msg("bob")],
[make_msg("bob"), make_msg("alice"), make_msg("bob")],
)
await self.run_allowed(cases)
async def test_disallows_messages_beyond_limit(self):
"""Cases where the amount of messages exceeds the limit, triggering the rule."""
cases = (
DisallowedCase(
[make_msg("bob"), make_msg("bob"), make_msg("bob")],
("bob",),
3,
),
DisallowedCase(
[make_msg("bob"), make_msg("bob"), make_msg("alice"), make_msg("bob")],
("bob",),
3,
),
)
await self.run_disallowed(cases)
def relevant_messages(self, case: DisallowedCase) -> Iterable[MockMessage]:
return tuple(msg for msg in case.recent_messages if msg.author in case.culprits)
def get_report(self, case: DisallowedCase) -> str:
return f"sent {case.n_violations} messages in {self.config['interval']}s"
|
import matplotlib.pyplot as plt
import os
import pandas as pd
import plotly.express as px
from src.datasets.get_dataset import get_dataset
from src.parser.visualize import parser
from src.visualize.visualize import viz_dataset
plt.switch_backend('agg')
def build_dataset_dist(dataset):
frames_by_class = zip(dataset._actions, dataset._num_frames_in_video)
frames_by_class_df = pd.DataFrame(frames_by_class, columns=['action', 'frames'])
frames_by_class_df.replace({"action": dataset._action_classes}, inplace=True)
fig = px.histogram(frames_by_class_df, x="frames", color="action", barmode='overlay', title='frames by action')
fig.write_html(os.path.join("datavisualize", 'HumanAct12_frames_by_action.html'))
if __name__ == '__main__':
# parse options
# parameters = optutils.visualize_dataset_parser()
parameters = parser(checkpoint=False)
parameters['num_frames'] = 120
parameters['fps'] = 10
parameters['dataset'] = 'datagen'
parameters['pose_rep'] = 'xyz'
parameters["num_actions_to_sample"] = 1
# parameters['pose_rep'] = 'rot6d'
# get device
device = parameters["device"]
# get data
DATA = get_dataset(name=parameters["dataset"])
dataset = DATA(split="train", **parameters)
# build_dataset_dist(dataset)
# add specific parameters from the dataset loading
dataset.update_parameters(parameters)
name = f"{parameters["dataset"]}_{parameters["pose_rep"]}"
folder = os.path.join("datavisualize", name)
viz_dataset(dataset, parameters, folder)
| import matplotlib.pyplot as plt
import os
import pandas as pd
import plotly.express as px
from src.datasets.get_dataset import get_dataset
from src.parser.visualize import parser
from src.visualize.visualize import viz_dataset
plt.switch_backend('agg')
def build_dataset_dist(dataset):
frames_by_class = zip(dataset._actions, dataset._num_frames_in_video)
frames_by_class_df = pd.DataFrame(frames_by_class, columns=['action', 'frames'])
frames_by_class_df.replace({"action": dataset._action_classes}, inplace=True)
fig = px.histogram(frames_by_class_df, x="frames", color="action", barmode='overlay', title='frames by action')
fig.write_html(os.path.join("datavisualize", 'HumanAct12_frames_by_action.html'))
if __name__ == '__main__':
# parse options
# parameters = optutils.visualize_dataset_parser()
parameters = parser(checkpoint=False)
parameters['num_frames'] = 120
parameters['fps'] = 10
parameters['dataset'] = 'datagen'
parameters['pose_rep'] = 'xyz'
parameters["num_actions_to_sample"] = 1
# parameters['pose_rep'] = 'rot6d'
# get device
device = parameters["device"]
# get data
DATA = get_dataset(name=parameters["dataset"])
dataset = DATA(split="train", **parameters)
# build_dataset_dist(dataset)
# add specific parameters from the dataset loading
dataset.update_parameters(parameters)
name = f"{parameters['dataset']}_{parameters['pose_rep']}"
folder = os.path.join("datavisualize", name)
viz_dataset(dataset, parameters, folder)
|
# Copyright 2021 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from typing import List, Dict, Set
import yaml
from yaml import Loader
def main():
import subprocess
import pathlib
from collections import Counter, defaultdict
# run gen-pipeline.sh to get full Buildkite pipeline
path = pathlib.Path(__file__).parent
script = path.joinpath('..', '.buildkite', 'gen-pipeline.sh').absolute()
env = dict(
PIPELINE_MODE='FULL',
BUILDKITE_PIPELINE_SLUG='horovod',
BUILDKITE_PIPELINE_DEFAULT_BRANCH='master',
BUILDKITE_BRANCH='master'
)
proc = subprocess.run([script], env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, encoding='utf-8')
if proc.returncode:
raise RuntimeError(f'Script exited with code {proc.returncode}: {script}')
# parse the pipeline
pipeline = yaml.load(proc.stdout, Loader=Loader)
steps = pipeline.get('steps', [])
images = [plugin['docker-compose#v3.5.0']['build']
for step in steps if isinstance(step, dict) and 'label' in step
and step['label'].startswith(':docker: Build ')
for plugin in step['plugins'] if 'docker-compose#v3.5.0' in plugin]
cpu_tests = [(re.sub(r' \(test-.*', '', re.sub(':[^:]*: ', '', step['label'])),
step['command'],
step['timeout_in_minutes'],
plugin['docker-compose#v3.5.0']['run'])
for step in steps if isinstance(step, dict) and 'label' in step and 'command' in step
and not step['label'].startswith(':docker: Build ') and '-cpu-' in step['label']
for plugin in step['plugins'] if 'docker-compose#v3.5.0' in plugin]
# we need to distinguish the two oneccl variants of some tests
cpu_tests = [(label + (' [ONECCL OFI]' if 'mpirun_command_ofi' in command else (' [ONECCL MPI]' if 'mpirun_command_mpi' in command else '')),
command,
timeout,
image)
for label, command, timeout, image in cpu_tests]
# check that labels are unique per image
cardinalities = Counter([(label, image) for label, command, timeout, image in cpu_tests])
conflicts = [(label, image, card) for (label, image), card in cardinalities.items() if card > 1]
if conflicts:
summary = '\n'.join([f'"{label}" for image "{image}"' for label, image, card in conflicts])
raise RuntimeError(f'There are {len(conflicts)} duplicate test labels for images:\n{summary}')
# commands for some labels may differ
# we make their labels unique here
label_commands = defaultdict(Counter)
for label, command, timeout, image in cpu_tests:
label_commands[label][command] += 1
labels_with_multiple_commands = {label: c for label, c in label_commands.items() if len(c) > 1}
new_labels_per_label_command = {(label, command): f'{label} {index+1}'
for label, commands in labels_with_multiple_commands.items()
for index, command in enumerate(commands)}
cpu_tests = [(new_labels_per_label_command[(label, command)] if (label, command) in new_labels_per_label_command else label,
command,
timeout,
image)
for label, command, timeout, image in cpu_tests]
# come up with test ids from test labels
test_labels = {label for label, command, timeout, image in cpu_tests}
test_id_per_label = [(label, re.sub('[^a-zA-Z0-9_]', '', re.sub('[ .]', '_', label)))
for label in test_labels]
if len({id for label, id in test_id_per_label}) != len(test_labels):
raise RuntimeError('Some test ids are not unique')
test_id_per_label = dict(test_id_per_label)
# collect tests per image
tests_per_image = {image: {test_id_per_label[label]
for label, command, timeout, test_image in cpu_tests
if test_image == image}
for image in sorted(images)}
# define no tests for any image (used for GPU builds below)
no_tests_per_image = defaultdict(lambda: set())
# index tests by id
tests = {test_id_per_label[label]: dict(label=label, command=command, timeout=timeout)
for label, command, timeout, image in cpu_tests}
def workflow_header() -> str:
return (f'# Do not edit this file! It has been generated by .github/gen-workflow-ci.py\n'
f'\n'
f'name: CI\n'
f'\n'
f'on:\n'
f' schedule:\n'
f' # run a build on master (this does not publish test results or cancel concurrent builds)\n'
f' - cron: \'0 10 * * *\' # everyday at 10am\n'
f' push:\n'
f' # only consider push to master, hotfix-branches, and tags\n'
f' # otherwise modify job.config.outputs.push\n'
f' branches: [ \'master\', \'hotfix-*\' ]\n'
f' tags: [ \'v*.*.*\' ]\n'
f' pull_request:\n'
f' # only consider pull requests into master\n'
f' branches: [ master ]\n'
f' workflow_dispatch:\n'
f'\n'
'permissions: {}\n'
f'\n'
f'concurrency:\n'
f' # This controls which concurrent builds to cancel:\n'
f' # - we do not want any concurrent builds on a branch (pull_request)\n'
f' # - we do not want concurrent builds on the same commit on master (push)\n'
f' # - we do not want concurrent builds on the same commit on a tag (push)\n'
f' # - we allow concurrent runs on the same commit on master and its tag (push)\n'
f' # - we allow concurrent runs on the same commit on master (push) and a scheduled build (schedule)\n'
f' #\n'
f' # A pull_request event only runs on branch commit, a push event only on master and tag commit.\n'
f' # A schedule event only runs on master HEAD commit.\n'
f' #\n'
f' # Expression github.ref means something like refs/heads/master or refs/tags/v0.22.1 or the branch.\n'
f' # This helps to not cancel concurrent runs on master or a tag that share the same commit.\n'
f' # Expression github.head_ref refers to the branch of the pull request.\n'
f' # On master, github.head_ref is empty, so we use the SHA of the commit, this means individual\n'
f' # commits to master will not be cancelled, while there can only be one concurrent build on a branch.\n'
f' #\n'
f' # We include the event name to we allow for concurrent scheduled and master builds.\n'
f' group: ci-${{{{ github.event_name }}}}-${{{{ github.ref }}}}-${{{{ github.head_ref || github.sha }}}}\n'
f' cancel-in-progress: true\n'
f'\n')
def jobs(*jobs: str) -> str:
return 'jobs:\n' \
' event_file:\n' \
' name: "Event File"\n' \
' runs-on: ubuntu-latest\n' \
' steps:\n' \
' - name: Upload\n' \
' uses: actions/upload-artifact@v2\n' \
' with:\n' \
' name: Event File\n' \
' path: ${{ github.event_path }}\n' \
'\n' + \
'\n'.join(jobs)
def init_workflow_job() -> str:
return (f' init-workflow:\n'
f' name: "Init Workflow"\n'
f' runs-on: ubuntu-latest\n'
f' outputs:\n'
f" run-at-all: ${{{{ github.event_name != "schedule" || github.repository == "horovod/horovod" }}}}\n"
f" # if we don't get a clear 'false', we fall back to building and testing\n"
f" run-builds-and-tests: ${{{{ steps.tests.outputs.needed != "false" }}}}\n"
f' buildkite-branch-label: "${{{{ steps.config-buildkite.outputs.branch-label }}}}"\n'
f' buildkite-message: "${{{{ steps.config-buildkite.outputs.message }}}}"\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' - name: Setup Python\n'
f' uses: actions/setup-python@v2\n'
f' with:\n'
f' python-version: 3.8\n'
f' - name: Pip install dependencies\n'
f' run: pip install -r .github/requirements.txt\n'
f'\n'
f' - name: Check ci.yaml is up-to-date\n'
f' run: |\n'
f' python .github/gen-workflow-ci.py\n'
f' if [[ $(git diff .github/workflows/ci.yaml | wc -l) -gt 0 ]]\n'
f' then\n'
f' echo "::error::Workflow file .github/workflows/ci.yaml is out-dated, please run .github/gen-workflow-ci.py and commit changes"\n'
f' exit 1\n'
f' fi\n'
f' shell: bash\n'
f'\n'
f' - name: Check if tests are needed\n'
f' id: tests\n'
f' env:\n'
f' GITHUB_BASE_SHA: ${{{{ github.event.pull_request.base.sha }}}}\n'
f' GITHUB_HEAD_SHA: ${{{{ github.event.pull_request.head.sha }}}}\n'
f' run: |\n'
f' if [[ "${{{{ github.event_name }}}}" == "pull_request" ]]\n'
f' then\n'
f' changes="$(python .github/get-changed-code-files.py)"\n'
f' if [[ -z "$changes" ]]\n'
f' then\n'
f' echo "No code changes, no need to build and test"\n'
f' echo "::set-output name=needed::false"\n'
f' else\n'
f' echo "Code changes, we need to build and test:"\n'
f' echo "$changes"\n'
f' echo "::set-output name=needed::true"\n'
f' fi\n'
f' else\n'
f' echo "This is not part of a pull request, we need to build and test"\n'
f' echo "::set-output name=needed::true"\n'
f' fi\n'
f'\n'
f' - name: Configure Buildkite Build\n'
f' id: config-buildkite\n'
f' env:\n'
f' GITHUB_TOKEN: ${{{{ secrets.GITHUB_TOKEN }}}}\n'
f' run: |\n'
f' branch="${{{{ github.event.pull_request.head.ref || github.ref }}}}"\n'
f' branch="${{branch#'refs/heads/'}}"\n'
f' branch="${{branch#'refs/tags/'}}"\n'
f'\n'
f' branch_label="${{branch}}"\n'
f' if [[ "${{{{ github.event_name }}}}" == "schedule" ]]\n'
f' then\n'
f' # we add this label to the branch used by Buildkite to avoid it cancelling one of concurrent schedule and push builds on master\n'
f' branch_label="${{branch}} (schedule)"\n'
f' fi\n'
f' echo "::set-output name=branch-label::${{branch_label}}"\n'
f'\n'
f' if [[ "${{{{ github.event_name }}}}" == "pull_request" ]]\n'
f' then\n'
f' head_sha="${{{{ github.event.pull_request.head.sha }}}}"\n'
f' message="$(gh api https://api.github.com/repos/horovod/horovod/commits/${{head_sha}} -q .commit.message | head -n1)"\n'
f' echo "::set-output name=message::${{message}}"\n'
f' fi\n'
f'\n'
f' - name: Provide PR meta\n'
f" if: github.event_name == 'pull_request'\n"
f' run: |\n'
f' rm -f pr.json\n'
f' echo -n "{{" >> pr.json\n'
f' echo -n " \\\"merge_sha\\\": \\\"${{{{ github.sha }}}}\\\"," >> pr.json\n'
f' echo -n " \\\"base_sha\\\": \\\"${{{{ github.event.pull_request.base.sha }}}}\\\"," >> pr.json\n'
f' echo -n " \\\"head_sha\\\": \\\"${{{{ github.event.pull_request.head.sha }}}}\\\" " >> pr.json\n'
f' echo -n "}}" >> pr.json\n'
f' cat pr.json\n'
f'\n'
f' - name: Upload PR meta\n'
f' uses: actions/upload-artifact@v2\n'
f" if: github.event_name == 'pull_request'\n"
f' with:\n'
f' name: PR Meta\n'
f' path: pr.json\n'
f'\n')
def build_and_test_images(id: str,
name: str,
needs: List[str],
images: List[str],
tests_per_image: Dict[str, Set[str]],
tests: Dict[str, Dict],
parallel_images: int = None,
attempts: int = 3) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
if parallel_images is None:
parallel_images = len(images)
failure = "'failure'"
return (f' {id}:\n'
f' name: "{name} (${{{{ matrix.image }}}})"\n'
f' needs: [{', '.join(needs)}]\n'
f' if: >\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true'\n"
f' runs-on: ubuntu-latest\n'
f'\n'
f' strategy:\n'
f' max-parallel: {parallel_images}\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n' +
'\n'.join([f' - image: {image}\n' +
f''.join([f' {test}: true\n'
for test in sorted(list(tests_per_image.get(image, [])))]) +
f' build_timeout: {30 if '-cpu-' in image else 40}\n'
for image in sorted(images)
# oneccl does not compile on GitHub Workflows:
# https://github.com/horovod/horovod/issues/2846
if '-oneccl-' not in image]) +
f'\n'
f' steps:\n'
f' - name: Clean up disk space\n'
f' # deleting these paths frees 38 GB disk space:\n'
f' # sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc\n'
f' # but this sometimes takes 3-4 minutes\n'
f' # so we delete only some sub-paths which are known to be quick (10s) and 20 GB\n'
f' run: |\n'
f' echo ::group::Disk space before clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' for dir in /usr/share/dotnet/sdk/\*/nuGetPackagesArchive.lzma \\\n'
f' /usr/share/dotnet/shared \\\n'
f' /usr/local/lib/android/sdk/ndk \\\n'
f' /usr/local/lib/android/sdk/build-tools \\\n'
f' /opt/ghc\n'
f' do\n'
f' echo ::group::Deleting "$dir"\n'
f' sudo du -hsc $dir | tail -n1 || true\n'
f' sudo rm -rf $dir\n'
f' echo ::endgroup::\n'
f' done\n'
f'\n'
f' echo ::group::Disk space after clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: recursive\n'
f'\n'
f' - name: Setup Python\n'
f' uses: actions/setup-python@v2\n'
f' with:\n'
f' python-version: 3.8\n'
f'\n'
f' - name: Setup docker-compose\n'
f' run: pip install docker-compose\n'
f'\n'
f' - name: Configure AWS credentials\n'
f' id: aws\n'
f' uses: aws-actions/configure-aws-credentials@v1\n'
f' # AWS credentials are used to authenticate against AWS ECR to pull and push test images\n'
f' # We can only authenticate when running on Horovod repo (not a fork)\n'
f' if: >\n'
f' github.repository == \'horovod/horovod\' &&\n'
f' ( github.event_name != \'pull_request\' || github.event.pull_request.head.repo.full_name == github.repository )\n'
f' continue-on-error: true\n'
f' with:\n'
f' aws-access-key-id: ${{{{ secrets.AWS_ACCESS_KEY_ID }}}}\n'
f' aws-secret-access-key: ${{{{ secrets.AWS_SECRET_ACCESS_KEY }}}}\n'
f' aws-region: us-east-1\n'
f'\n'
f' - name: Login to Amazon ECR\n'
f' id: ecr\n'
f' if: steps.aws.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' uses: aws-actions/amazon-ecr-login@v1\n'
f'\n'
f' - name: Add cache_from to docker-compose YAML\n'
f' if: steps.ecr.outcome == \'success\'\n'
f' run: |\n'
f' cat > docker-compose.test.override.yml <<EOF\n'
f' version: \'2.3\'\n'
f' services:\n'
f' ${{{{ matrix.image }}}}:\n'
f' build:\n'
f' cache_from:\n'
f' - ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' EOF\n'
f' cat docker-compose.test.override.yml\n'
f' shell: bash\n'
f'\n'
f' - name: Pull latest test image\n'
f' if: steps.ecr.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' run: |\n'
f' docker pull ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' env:\n'
f' DOCKER_BUILDKIT: 1\n'
f'\n'
f' - name: Build\n'
f' id: build\n'
f' run: |\n'
f' override_yaml=""\n'
f' if [ -e docker-compose.test.override.yml ]; then override_yaml="-f docker-compose.test.override.yml"; fi\n'
f' .github/timeout-and-retry.sh ${{{{ matrix.build_timeout }}}}m 3 10 docker-compose -f docker-compose.test.yml $override_yaml build --pull ${{{{ matrix.image }}}}\n'
f' env:\n'
f' COMPOSE_DOCKER_CLI_BUILD: 1\n'
f' DOCKER_BUILDKIT: 1\n'
f'\n' +
'\n'.join([f' - name: "{test['label']} [attempt {attempt} of {attempts}]"\n'
f' id: {test_id}_run_{attempt}\n'
f' continue-on-error: {'true' if attempt < attempts else 'false'}\n'
f' if: always() && steps.build.outcome == \'success\' && matrix.{test_id} && {'true' if attempt == 1 else f'steps.{test_id}_run_{attempt-1}.outcome == {failure}"}\n'
f' run: |\n'
f' mkdir -p artifacts/${{{{ matrix.image }}}}/{test_id}_run_{attempt}\n'
f' docker-compose -f docker-compose.test.yml run -e GITHUB_ACTIONS --rm --volume "$(pwd)/artifacts/${{{{ matrix.image }}}}/{test_id}_run_{attempt}:/artifacts' ${{{{ matrix.image }}}} /usr/bin/timeout {test['timeout']}m {test['command']}\n'
f' shell: bash\n'
for test_id, test in sorted(tests.items(), key=lambda test: test[0])
for attempt in range(1, attempts+1)]) +
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always() && contains(matrix.image, \'-cpu-\')\n'
f' with:\n'
f' name: Unit Test Results - ${{{{ matrix.image }}}}\n'
f' path: artifacts/${{{{ matrix.image }}}}/**/*.xml\n'
f'\n'
f' - name: Push test image\n'
f' # We push test image to AWS ECR on push to Horovod master (not a fork)\n'
f' if: >\n'
f' github.event_name == \'push\' &&\n'
f' github.ref == \'refs/heads/master\' &&\n'
f' github.repository == \'horovod/horovod\' &&\n'
f' steps.ecr.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' run: |\n'
f' docker image ls | head\n'
f' docker tag horovod_${{{{ matrix.image }}}} ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' docker push ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' docker image ls | head\n'
f' shell: bash\n')
def build_and_test_macos(id: str, name: str, needs: List[str], attempts: int = 3) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
failure = "'failure'"
return (f' {id}:\n'
f' name: "{name} (${{{{ matrix.image }}}}-macos)"\n'
f' needs: [{', '.join(needs)}]\n'
f' if: >\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true'\n"
f' runs-on: macos-latest\n'
f'\n'
f' strategy:\n'
f' max-parallel: 3\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n'
f''
f' - image: test-cpu-openmpi-py3_7-tf1_15_5-keras2_2_4-torch1_6_0-mxnet1_5_1_p0\n'
f' HOROVOD_WITH_MPI: 1\n'
f' HOROVOD_WITHOUT_GLOO: 1\n'
f' TENSORFLOW: 1.15.0\n'
f' KERAS: 2.2.4\n'
f' PYTORCH: 1.6.0\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.7.0\n'
f' MXNET: 1.5.1.post0\n'
f'\n'
f' - image: test-cpu-gloo-py3_8-tf2_6_2-keras2_6_0-torch1_9_1-mxnet1_6_0\n'
f' HOROVOD_WITHOUT_MPI: 1\n'
f' HOROVOD_WITH_GLOO: 1\n'
f' TENSORFLOW: 2.6.5\n'
f' KERAS: 2.6.0\n'
f' PYTORCH: 1.9.1\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.10.1\n'
f' MXNET: 1.6.0\n'
f'\n'
f'' # mxnet 1.8.0.post0 does not compile for macos due to missing dnnl_config.h
f'' # mxnet 1.9.0 does not exist for macos
f' - image: test-openmpi-cpu-gloo-py3_8-tf2_7_0-keras2_7_0-torch1_10_1-mxnet1_7_0_p2\n'
f' HOROVOD_WITH_MPI: 1\n'
f' HOROVOD_WITH_GLOO: 1\n'
f' TENSORFLOW: 2.7.3\n'
f' KERAS: 2.7.0\n'
f' PYTORCH: 1.10.1\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.11.2\n'
f' MXNET: 1.7.0.post2\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: recursive\n'
f'\n'
f' - name: Build\n'
f' id: build\n'
f' env:\n'
f' HOROVOD_WITH_MPI: ${{{{ matrix.HOROVOD_WITH_MPI }}}}\n'
f' HOROVOD_WITHOUT_MPI: ${{{{ matrix.HOROVOD_WITHOUT_MPI }}}}\n'
f' HOROVOD_WITH_GLOO: ${{{{ matrix.HOROVOD_WITH_GLOO }}}}\n'
f' HOROVOD_WITHOUT_GLOO: ${{{{ matrix.HOROVOD_WITHOUT_GLOO }}}}\n'
f' TENSORFLOW: ${{{{ matrix.TENSORFLOW }}}}\n'
f' KERAS: ${{{{ matrix.KERAS }}}}\n'
f' PYTORCH: ${{{{ matrix.PYTORCH }}}}\n'
f' PYTORCH_LIGHTNING: ${{{{ matrix.PYTORCH_LIGHTNING }}}}\n'
f' TORCHVISION: ${{{{ matrix.TORCHVISION }}}}\n'
f' MXNET: ${{{{ matrix.MXNET }}}}\n'
f'\n'
f' # The python patch in the pyenv install step is to work around an incompatibility introduced in new xcode version in macOS Big Sur. The patch is provided by python team.\n'
f' # The original discussion is here https://github.com/pyenv/pyenv/issues/1737\n'
f' run: |\n'
f' brew reinstall -f zlib bzip2\n'
f' brew install -f openmpi cmake libuv pyenv coreutils curl\n'
f' export PATH=$(pyenv root)/shims:$PATH\n'
f' pyenv uninstall -f 3.7.7\n'
f' CFLAGS="-I$(brew --prefix bzip2)/include -I$(brew --prefix zlib)/include" LDFLAGS="-L$(brew --prefix zlib)/lib -L$(brew --prefix bzip2)/lib" pyenv install --patch 3.7.7 < <(curl -sSL https://github.com/python/cpython/commit/8ea6353.patch)\n'
f' pyenv global 3.7.7\n'
f' python --version\n'
f'\n'
f' python -m pip install -U pip\n'
f' pip install tensorflow==${{TENSORFLOW}} keras==${{KERAS}}\n'
f' if [[ ${{TENSORFLOW}} == 1.* ]] || [[ ${{TENSORFLOW}} == 2.[012345].* ]]; then pip install "h5py<3" "protobuf~=3.20"; fi\n'
f' pip install torch==${{PYTORCH}} pytorch_lightning==${{PYTORCH_LIGHTNING}} torchvision==${{TORCHVISION}}\n'
f' pip install mxnet==${{MXNET}}\n'
f' HOROVOD_WITH_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 HOROVOD_WITH_MXNET=1 pip install --no-cache-dir .[test]\n'
f' horovodrun --check-build\n'
f'\n' +
'\n'.join([f' - name: Test [attempt {attempt} of {attempts}]\n'
f' id: test-{attempt}\n'
f' continue-on-error: {'true' if attempt < attempts else 'false'}\n'
f' if: always() && steps.build.outcome == \'success\' && {'true' if attempt == 1 else f'steps.test-{attempt-1}.outcome == {failure}"}\n'
f'\n'
f' run: |\n'
f' export PATH=$(pyenv root)/shims:$PATH\n'
f' pyenv global 3.7.7\n'
f' python --version\n'
f'\n'
f' artifacts_path="$(pwd)/artifacts/${{{{ matrix.image }}}}-macos-run-{attempt}"\n'
f' mkdir -p "$artifacts_path"\n'
f' echo "::set-output name=artifacts-path::$artifacts_path"\n'
f' echo pytest -v --capture=no --continue-on-collection-errors --junit-xml=$artifacts_path/junit.\$1.\${{HOROVOD_RANK:-\${{OMPI_COMM_WORLD_RANK:-\${{PMI_RANK}}}}}}.\$2.xml \${{@:2}} > pytest.sh\n'
f' chmod u+x pytest.sh\n'
f'\n'
f' cd test/parallel\n'
f' ls test_*.py | gtimeout 10m xargs -n 1 horovodrun -np 2 /bin/bash ../../pytest.sh macos\n'
for attempt in range(1, attempts+1)]) +
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always()\n'
f' with:\n'
f' name: Unit Test Results - ${{{{ matrix.image }}}}-macos\n'
f' path: |\n' +
'\n'.join([f' ${{{{ steps.test-{attempt}.outputs.artifacts-path }}}}'
for attempt in range(1, attempts+1)]))
def trigger_buildkite_job(id: str, name: str, needs: List[str], mode: str) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
return (f' {id}-trigger:\n'
f' name: "{name} (trigger Builtkite)"\n'
f' needs: [{', '.join(needs)}]\n'
f' runs-on: ubuntu-latest\n'
f' if: >\n'
f' github.repository == \'horovod/horovod\' &&\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true' &&\n"
f' ( github.event_name != \'pull_request\' || github.event.pull_request.head.repo.full_name == github.repository )\n'
f' outputs:\n'
f' url: ${{{{ steps.build.outputs.url }}}}\n'
f'\n'
f' steps:\n'
f' - name: Trigger Buildkite Pipeline\n'
f' id: build\n'
f' uses: EnricoMi/trigger-pipeline-action@master\n'
f' env:\n'
f' PIPELINE: "horovod/horovod"\n'
f' # COMMIT is taken from GITHUB_SHA\n'
f' BRANCH: "${{{{ needs.init-workflow.outputs.buildkite-branch-label }}}} ({mode})"\n'
f' # empty MESSAGE will be filled by Buildkite from commit message\n'
f' MESSAGE: "${{{{ needs.init-workflow.outputs.buildkite-message }}}}"\n'
f' BUILDKITE_API_ACCESS_TOKEN: ${{{{ secrets.BUILDKITE_TOKEN }}}}\n'
f' BUILD_ENV_VARS: "{{\\'PIPELINE_MODE\\': \\'{mode}\\"}}"\n'
f'\n'
f' {id}:\n'
f' name: "{name} (download Builtkite)"\n'
f' needs: [{id}-trigger]\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' steps:\n'
f' - name: Download Buildkite Artifacts\n'
f' id: download\n'
f' uses: EnricoMi/download-buildkite-artifact-action@v1\n'
f' with:\n'
f' buildkite_token: ${{{{ secrets.BUILDKITE_TOKEN }}}}\n'
f' buildkite_build_url: ${{{{ needs.{id}-trigger.outputs.url }}}}\n'
f' ignore_build_states: blocked,canceled,skipped,not_run\n'
f' ignore_job_states: timed_out\n'
f' output_path: artifacts/Unit Test Results - {mode} on Builtkite\n'
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always()\n'
f' with:\n'
f' name: Unit Test Results - {mode} on Builtkite\n'
f' path: artifacts/Unit Test Results - {mode} on Builtkite/**/*.xml\n' +
f'\n'
f' - name: Check Buildkite job state\n'
f' if: >\n'
f' always() &&\n'
f' steps.download.conclusion == \'success\' &&\n'
f' steps.download.outputs.build-state != \'passed\'\n'
f' run: |\n'
f' echo "::warning::Buildkite pipeline did not pass: ${{{{ needs.{id}-trigger.outputs.url }}}}"\n'
f' exit 1\n')
def publish_docker_images(needs: List[str], images: List[str]) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
if needs != ['init-workflow', 'build-and-test', 'buildkite']:
raise RuntimeError('This job has hard-coded needs, which you may want to adjust')
return (f' docker-config:\n'
f' name: Configure docker build\n'
f' needs: [{', '.join(needs)}]\n'
f" # build-and-test and buildkite might have been skipped (! needs.init-workflow.outputs.run-builds-and-tests)\n"
f' # buildkite might have been skipped (workflow runs for a fork PR),\n'
f' # we still want to build docker images (though we might not want to push them)\n'
f' if: >\n'
f' always() &&\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true' &&\n"
f" needs.build-and-test.result == 'success' &&\n"
f" ( needs.buildkite.result == 'success' || needs.buildkite.result == 'skipped' )\n"
f' runs-on: ubuntu-latest\n'
f' outputs:\n'
f' run: ${{{{ steps.config.outputs.run }}}}\n'
f' push: ${{{{ steps.config.outputs.push }}}}\n'
f'\n'
f' steps:\n'
f' - name: Config\n'
f' id: config\n'
f' env:\n'
f' # run workflow for all events on Horovod repo and non-schedule events on forks\n'
f' run: ${{{{ github.repository == \'horovod/horovod\' || github.event_name != \'schedule\' }}}}\n'
f' # push images only from Horovod repo and for schedule and push events\n'
f' push: ${{{{ github.repository == \'horovod/horovod\' && contains(\'schedule,push\', github.event_name) }}}}\n'
f' run: |\n'
f' echo Repository: ${{{{ github.repository }}}}\n'
f' echo Event: ${{{{ github.event_name }}}}\n'
f' echo Run: $run\n'
f' echo "::set-output name=run::$run"\n'
f' echo Push: $push\n'
f' echo "::set-output name=push::$push"\n'
f'\n'
f' docker-build:\n'
f' name: Build docker image ${{{{ matrix.docker-image }}}} (push=${{{{ needs.docker-config.outputs.push }}}})\n'
f' needs: docker-config\n'
f' if: always() && needs.docker-config.outputs.run == \'true\'\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' # we want an ongoing run of this workflow to be canceled by a later commit\n'
f' # so that there is only one concurrent run of this workflow for each branch\n'
f' concurrency:\n'
f' # github.ref means something like refs/heads/master or refs/tags/v0.22.1 or the branch.\n'
f' # This helps to not cancel concurrent runs on master and a tag that share the same commit\n'
f' # head_ref refers to the pull request branch so we run only one workflow for the given pull request.\n'
f' # On master, head_ref is empty, so we use the SHA of the commit, this means\n'
f' # commits to master will not be cancelled, which is important to ensure\n'
f' # that every commit to master is full tested and deployed.\n'
f' group: docker-${{{{ matrix.docker-image }}}}-${{{{ github.ref }}}}-${{{{ github.head_ref || github.sha }}}}\n'
f' cancel-in-progress: true\n'
f'\n'
f' strategy:\n'
f' fail-fast: false\n'
f' matrix:\n'
f' docker-image:\n' +
''.join([f' - {image}\n'
for image in images]) +
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: \'recursive\'\n'
f'\n'
f' - name: Docker meta\n'
f' id: meta\n'
f' uses: crazy-max/ghaction-docker-meta@v2\n'
f' with:\n'
f' # list of Docker images to use as base name for tags\n'
f' images: |\n'
f' horovod/${{{{ matrix.docker-image }}}}\n'
f' # generate Docker tags based on the following events/attributes\n'
f' tags: |\n'
f' type=schedule\n'
f' type=ref,event=branch\n'
f' type=ref,event=pr\n'
f' type=semver,pattern={{{{version}}}}\n'
f' type=semver,pattern={{{{major}}}}.{{{{minor}}}}\n'
f' type=semver,pattern={{{{major}}}}\n'
f' type=sha\n'
f'\n'
f' - name: Set up QEMU\n'
f' uses: docker/setup-qemu-action@v1\n'
f' - name: Set up Docker Buildx\n'
f' uses: docker/setup-buildx-action@v1\n'
f'\n'
f' - name: Login to DockerHub\n'
f' if: needs.docker-config.outputs.push == \'true\'\n'
f' uses: docker/login-action@v1\n'
f' with:\n'
f' username: ${{{{ secrets.DOCKERHUB_USERNAME }}}}\n'
f' password: ${{{{ secrets.DOCKERHUB_TOKEN }}}}\n'
f'\n'
f' - name: Clean up disk space\n'
f' # deleting these paths frees 38 GB disk space:\n'
f' # sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc\n'
f' # but this sometimes takes 3-4 minutes\n'
f' # so we delete only some sub-paths which are known to be quick (10s) and 20 GB\n'
f' run: |\n'
f' echo ::group::Disk space before clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' for dir in /usr/share/dotnet/sdk/\*/nuGetPackagesArchive.lzma \\\n'
f' /usr/share/dotnet/shared \\\n'
f' /usr/local/lib/android/sdk/ndk \\\n'
f' /usr/local/lib/android/sdk/build-tools \\\n'
f' /opt/ghc\n'
f' do\n'
f' echo ::group::Deleting "$dir"\n'
f' sudo du -hsc $dir | tail -n1 || true\n'
f' sudo rm -rf $dir\n'
f' echo ::endgroup::\n'
f' done\n'
f'\n'
f' echo ::group::Disk space after clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' - name: Build and push\n'
f' uses: docker/build-push-action@v2\n'
f' timeout-minutes: 60\n'
f' with:\n'
f' context: .\n'
f' file: ./docker/${{{{ matrix.docker-image }}}}/Dockerfile\n'
f' push: ${{{{ needs.docker-config.outputs.push }}}}\n'
f' tags: ${{{{ steps.meta.outputs.tags }}}}\n'
f' labels: ${{{{ steps.meta.outputs.labels }}}}\n'
f'\n'
f' - name: Show free space\n'
f' if: always()\n'
f' run: |\n'
f' echo ::group::Disk Space\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Space\n'
f' docker system df\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Images\n'
f' docker images -a\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Container\n'
f' docker container list -a\n'
f' echo ::endgroup::\n')
def sync_files(needs: List[str]) -> str:
return (f' sync-files:\n'
f' name: "Sync Files (${{{{ matrix.name }}}})"\n'
f' needs: [{', '.join(needs)}]\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' strategy:\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n'
f' - name: Docs Summary\n'
f' left_file: README.rst\n'
f' right_file: docs/summary.rst\n'
f' init: sed -i -e s/docs\///g README.rst\n'
f'\n'
f' - name: Examples Keras Spark3\n'
f' left_file: examples/spark/keras/keras_spark_rossmann_run.py\n'
f' right_file: examples/spark/keras/keras_spark3_rossmann.py\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v1\n'
f'\n'
f' - name: Diffing ${{{{ matrix.left_file }}}} with ${{{{ matrix.right_file }}}}\n'
f' env:\n'
f' LEFT: ${{{{ matrix.left_file }}}}\n'
f' RIGHT: ${{{{ matrix.right_file }}}}\n'
f' INIT: ${{{{ matrix.init }}}}\n'
f' run: |\n'
f' $INIT\n'
f'\n'
f' patch --quiet -p0 $LEFT ${{RIGHT}}.patch -o ${{LEFT}}.expected\n'
f' if ! diff -q ${{LEFT}}.expected --label $LEFT $RIGHT\n'
f' then\n'
f' echo\n'
f' echo "::error::Files are out-of-sync: $LEFT vs. $RIGHT"\n'
f' echo "Unexpected differences are:"\n'
f' diff ${{LEFT}}.expected --label $LEFT $RIGHT || true\n'
f'\n'
f' echo\n'
f' echo "Use the following as ${{RIGHT}}.patch to accept those changes:"\n'
f' diff $LEFT $RIGHT || true\n'
f'\n'
f' false\n'
f' fi\n')
with open(path.joinpath('workflows', 'ci.yaml').absolute(), 'wt') as w:
mins = ['tfmin', 'torchmin', 'mxnetmin']
heads = ['tfhead', 'torchhead', 'mxnethead']
allmin_images = [image for image in images if all(min in image for min in mins)]
allhead_images = [image for image in images if all(head in image for head in heads)]
release_images = [image for image in images if image not in allhead_images + allmin_images]
cpu_release_images = [image for image in release_images if '-cpu-' in image]
gpu_release_images = [image for image in release_images if '-gpu-' in image or '-mixed-' in image]
workflow = workflow_header() + jobs(
init_workflow_job(),
# changing these names require changes in the workflow-conclusion step in ci-results.yaml
build_and_test_images(id='build-and-test', name='Build and Test', needs=['init-workflow'], images=release_images, parallel_images=len(cpu_release_images), tests_per_image=tests_per_image, tests=tests),
build_and_test_images(id='build-and-test-heads', name='Build and Test heads', needs=['build-and-test'], images=allhead_images, tests_per_image=tests_per_image, tests=tests),
build_and_test_images(id='build-mins', name='Build mins', needs=['build-and-test'], images=allmin_images, tests_per_image=tests_per_image, tests={}),
build_and_test_macos(id='build-and-test-macos', name='Build and Test macOS', needs=['build-and-test']),
trigger_buildkite_job(id='buildkite', name='Build and Test GPU', needs=['build-and-test'], mode='GPU NON HEADS'),
trigger_buildkite_job(id='buildkite-heads', name='Build and Test GPU heads', needs=['build-and-test'], mode='GPU HEADS'),
publish_docker_images(needs=['build-and-test', 'buildkite'], images=['horovod', 'horovod-cpu', 'horovod-ray']),
sync_files(needs=['init-workflow'])
)
print(workflow, file=w, end='')
if __name__ == "__main__":
main()
| # Copyright 2021 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from typing import List, Dict, Set
import yaml
from yaml import Loader
def main():
import subprocess
import pathlib
from collections import Counter, defaultdict
# run gen-pipeline.sh to get full Buildkite pipeline
path = pathlib.Path(__file__).parent
script = path.joinpath('..', '.buildkite', 'gen-pipeline.sh').absolute()
env = dict(
PIPELINE_MODE='FULL',
BUILDKITE_PIPELINE_SLUG='horovod',
BUILDKITE_PIPELINE_DEFAULT_BRANCH='master',
BUILDKITE_BRANCH='master'
)
proc = subprocess.run([script], env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, encoding='utf-8')
if proc.returncode:
raise RuntimeError(f'Script exited with code {proc.returncode}: {script}')
# parse the pipeline
pipeline = yaml.load(proc.stdout, Loader=Loader)
steps = pipeline.get('steps', [])
images = [plugin['docker-compose#v3.5.0']['build']
for step in steps if isinstance(step, dict) and 'label' in step
and step['label'].startswith(':docker: Build ')
for plugin in step['plugins'] if 'docker-compose#v3.5.0' in plugin]
cpu_tests = [(re.sub(r' \(test-.*', '', re.sub(':[^:]*: ', '', step['label'])),
step['command'],
step['timeout_in_minutes'],
plugin['docker-compose#v3.5.0']['run'])
for step in steps if isinstance(step, dict) and 'label' in step and 'command' in step
and not step['label'].startswith(':docker: Build ') and '-cpu-' in step['label']
for plugin in step['plugins'] if 'docker-compose#v3.5.0' in plugin]
# we need to distinguish the two oneccl variants of some tests
cpu_tests = [(label + (' [ONECCL OFI]' if 'mpirun_command_ofi' in command else (' [ONECCL MPI]' if 'mpirun_command_mpi' in command else '')),
command,
timeout,
image)
for label, command, timeout, image in cpu_tests]
# check that labels are unique per image
cardinalities = Counter([(label, image) for label, command, timeout, image in cpu_tests])
conflicts = [(label, image, card) for (label, image), card in cardinalities.items() if card > 1]
if conflicts:
summary = '\n'.join([f'"{label}" for image "{image}"' for label, image, card in conflicts])
raise RuntimeError(f'There are {len(conflicts)} duplicate test labels for images:\n{summary}')
# commands for some labels may differ
# we make their labels unique here
label_commands = defaultdict(Counter)
for label, command, timeout, image in cpu_tests:
label_commands[label][command] += 1
labels_with_multiple_commands = {label: c for label, c in label_commands.items() if len(c) > 1}
new_labels_per_label_command = {(label, command): f'{label} {index+1}'
for label, commands in labels_with_multiple_commands.items()
for index, command in enumerate(commands)}
cpu_tests = [(new_labels_per_label_command[(label, command)] if (label, command) in new_labels_per_label_command else label,
command,
timeout,
image)
for label, command, timeout, image in cpu_tests]
# come up with test ids from test labels
test_labels = {label for label, command, timeout, image in cpu_tests}
test_id_per_label = [(label, re.sub('[^a-zA-Z0-9_]', '', re.sub('[ .]', '_', label)))
for label in test_labels]
if len({id for label, id in test_id_per_label}) != len(test_labels):
raise RuntimeError('Some test ids are not unique')
test_id_per_label = dict(test_id_per_label)
# collect tests per image
tests_per_image = {image: {test_id_per_label[label]
for label, command, timeout, test_image in cpu_tests
if test_image == image}
for image in sorted(images)}
# define no tests for any image (used for GPU builds below)
no_tests_per_image = defaultdict(lambda: set())
# index tests by id
tests = {test_id_per_label[label]: dict(label=label, command=command, timeout=timeout)
for label, command, timeout, image in cpu_tests}
def workflow_header() -> str:
return (f'# Do not edit this file! It has been generated by .github/gen-workflow-ci.py\n'
f'\n'
f'name: CI\n'
f'\n'
f'on:\n'
f' schedule:\n'
f' # run a build on master (this does not publish test results or cancel concurrent builds)\n'
f' - cron: \'0 10 * * *\' # everyday at 10am\n'
f' push:\n'
f' # only consider push to master, hotfix-branches, and tags\n'
f' # otherwise modify job.config.outputs.push\n'
f' branches: [ \'master\', \'hotfix-*\' ]\n'
f' tags: [ \'v*.*.*\' ]\n'
f' pull_request:\n'
f' # only consider pull requests into master\n'
f' branches: [ master ]\n'
f' workflow_dispatch:\n'
f'\n'
'permissions: {}\n'
f'\n'
f'concurrency:\n'
f' # This controls which concurrent builds to cancel:\n'
f' # - we do not want any concurrent builds on a branch (pull_request)\n'
f' # - we do not want concurrent builds on the same commit on master (push)\n'
f' # - we do not want concurrent builds on the same commit on a tag (push)\n'
f' # - we allow concurrent runs on the same commit on master and its tag (push)\n'
f' # - we allow concurrent runs on the same commit on master (push) and a scheduled build (schedule)\n'
f' #\n'
f' # A pull_request event only runs on branch commit, a push event only on master and tag commit.\n'
f' # A schedule event only runs on master HEAD commit.\n'
f' #\n'
f' # Expression github.ref means something like refs/heads/master or refs/tags/v0.22.1 or the branch.\n'
f' # This helps to not cancel concurrent runs on master or a tag that share the same commit.\n'
f' # Expression github.head_ref refers to the branch of the pull request.\n'
f' # On master, github.head_ref is empty, so we use the SHA of the commit, this means individual\n'
f' # commits to master will not be cancelled, while there can only be one concurrent build on a branch.\n'
f' #\n'
f' # We include the event name to we allow for concurrent scheduled and master builds.\n'
f' group: ci-${{{{ github.event_name }}}}-${{{{ github.ref }}}}-${{{{ github.head_ref || github.sha }}}}\n'
f' cancel-in-progress: true\n'
f'\n')
def jobs(*jobs: str) -> str:
return 'jobs:\n' \
' event_file:\n' \
' name: "Event File"\n' \
' runs-on: ubuntu-latest\n' \
' steps:\n' \
' - name: Upload\n' \
' uses: actions/upload-artifact@v2\n' \
' with:\n' \
' name: Event File\n' \
' path: ${{ github.event_path }}\n' \
'\n' + \
'\n'.join(jobs)
def init_workflow_job() -> str:
return (f' init-workflow:\n'
f' name: "Init Workflow"\n'
f' runs-on: ubuntu-latest\n'
f' outputs:\n'
f" run-at-all: ${{{{ github.event_name != 'schedule' || github.repository == 'horovod/horovod' }}}}\n"
f" # if we don't get a clear 'false', we fall back to building and testing\n"
f" run-builds-and-tests: ${{{{ steps.tests.outputs.needed != 'false' }}}}\n"
f' buildkite-branch-label: "${{{{ steps.config-buildkite.outputs.branch-label }}}}"\n'
f' buildkite-message: "${{{{ steps.config-buildkite.outputs.message }}}}"\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' - name: Setup Python\n'
f' uses: actions/setup-python@v2\n'
f' with:\n'
f' python-version: 3.8\n'
f' - name: Pip install dependencies\n'
f' run: pip install -r .github/requirements.txt\n'
f'\n'
f' - name: Check ci.yaml is up-to-date\n'
f' run: |\n'
f' python .github/gen-workflow-ci.py\n'
f' if [[ $(git diff .github/workflows/ci.yaml | wc -l) -gt 0 ]]\n'
f' then\n'
f' echo "::error::Workflow file .github/workflows/ci.yaml is out-dated, please run .github/gen-workflow-ci.py and commit changes"\n'
f' exit 1\n'
f' fi\n'
f' shell: bash\n'
f'\n'
f' - name: Check if tests are needed\n'
f' id: tests\n'
f' env:\n'
f' GITHUB_BASE_SHA: ${{{{ github.event.pull_request.base.sha }}}}\n'
f' GITHUB_HEAD_SHA: ${{{{ github.event.pull_request.head.sha }}}}\n'
f' run: |\n'
f' if [[ "${{{{ github.event_name }}}}" == "pull_request" ]]\n'
f' then\n'
f' changes="$(python .github/get-changed-code-files.py)"\n'
f' if [[ -z "$changes" ]]\n'
f' then\n'
f' echo "No code changes, no need to build and test"\n'
f' echo "::set-output name=needed::false"\n'
f' else\n'
f' echo "Code changes, we need to build and test:"\n'
f' echo "$changes"\n'
f' echo "::set-output name=needed::true"\n'
f' fi\n'
f' else\n'
f' echo "This is not part of a pull request, we need to build and test"\n'
f' echo "::set-output name=needed::true"\n'
f' fi\n'
f'\n'
f' - name: Configure Buildkite Build\n'
f' id: config-buildkite\n'
f' env:\n'
f' GITHUB_TOKEN: ${{{{ secrets.GITHUB_TOKEN }}}}\n'
f' run: |\n'
f' branch="${{{{ github.event.pull_request.head.ref || github.ref }}}}"\n'
f' branch="${{branch#"refs/heads/"}}"\n'
f' branch="${{branch#"refs/tags/"}}"\n'
f'\n'
f' branch_label="${{branch}}"\n'
f' if [[ "${{{{ github.event_name }}}}" == "schedule" ]]\n'
f' then\n'
f' # we add this label to the branch used by Buildkite to avoid it cancelling one of concurrent schedule and push builds on master\n'
f' branch_label="${{branch}} (schedule)"\n'
f' fi\n'
f' echo "::set-output name=branch-label::${{branch_label}}"\n'
f'\n'
f' if [[ "${{{{ github.event_name }}}}" == "pull_request" ]]\n'
f' then\n'
f' head_sha="${{{{ github.event.pull_request.head.sha }}}}"\n'
f' message="$(gh api https://api.github.com/repos/horovod/horovod/commits/${{head_sha}} -q .commit.message | head -n1)"\n'
f' echo "::set-output name=message::${{message}}"\n'
f' fi\n'
f'\n'
f' - name: Provide PR meta\n'
f" if: github.event_name == 'pull_request'\n"
f' run: |\n'
f' rm -f pr.json\n'
f' echo -n "{{" >> pr.json\n'
f' echo -n " \\\"merge_sha\\\": \\\"${{{{ github.sha }}}}\\\"," >> pr.json\n'
f' echo -n " \\\"base_sha\\\": \\\"${{{{ github.event.pull_request.base.sha }}}}\\\"," >> pr.json\n'
f' echo -n " \\\"head_sha\\\": \\\"${{{{ github.event.pull_request.head.sha }}}}\\\" " >> pr.json\n'
f' echo -n "}}" >> pr.json\n'
f' cat pr.json\n'
f'\n'
f' - name: Upload PR meta\n'
f' uses: actions/upload-artifact@v2\n'
f" if: github.event_name == 'pull_request'\n"
f' with:\n'
f' name: PR Meta\n'
f' path: pr.json\n'
f'\n')
def build_and_test_images(id: str,
name: str,
needs: List[str],
images: List[str],
tests_per_image: Dict[str, Set[str]],
tests: Dict[str, Dict],
parallel_images: int = None,
attempts: int = 3) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
if parallel_images is None:
parallel_images = len(images)
failure = "'failure'"
return (f' {id}:\n'
f' name: "{name} (${{{{ matrix.image }}}})"\n'
f' needs: [{", ".join(needs)}]\n'
f' if: >\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true'\n"
f' runs-on: ubuntu-latest\n'
f'\n'
f' strategy:\n'
f' max-parallel: {parallel_images}\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n' +
'\n'.join([f' - image: {image}\n' +
f''.join([f' {test}: true\n'
for test in sorted(list(tests_per_image.get(image, [])))]) +
f' build_timeout: {30 if "-cpu-" in image else 40}\n'
for image in sorted(images)
# oneccl does not compile on GitHub Workflows:
# https://github.com/horovod/horovod/issues/2846
if '-oneccl-' not in image]) +
f'\n'
f' steps:\n'
f' - name: Clean up disk space\n'
f' # deleting these paths frees 38 GB disk space:\n'
f' # sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc\n'
f' # but this sometimes takes 3-4 minutes\n'
f' # so we delete only some sub-paths which are known to be quick (10s) and 20 GB\n'
f' run: |\n'
f' echo ::group::Disk space before clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' for dir in /usr/share/dotnet/sdk/\*/nuGetPackagesArchive.lzma \\\n'
f' /usr/share/dotnet/shared \\\n'
f' /usr/local/lib/android/sdk/ndk \\\n'
f' /usr/local/lib/android/sdk/build-tools \\\n'
f' /opt/ghc\n'
f' do\n'
f' echo ::group::Deleting "$dir"\n'
f' sudo du -hsc $dir | tail -n1 || true\n'
f' sudo rm -rf $dir\n'
f' echo ::endgroup::\n'
f' done\n'
f'\n'
f' echo ::group::Disk space after clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: recursive\n'
f'\n'
f' - name: Setup Python\n'
f' uses: actions/setup-python@v2\n'
f' with:\n'
f' python-version: 3.8\n'
f'\n'
f' - name: Setup docker-compose\n'
f' run: pip install docker-compose\n'
f'\n'
f' - name: Configure AWS credentials\n'
f' id: aws\n'
f' uses: aws-actions/configure-aws-credentials@v1\n'
f' # AWS credentials are used to authenticate against AWS ECR to pull and push test images\n'
f' # We can only authenticate when running on Horovod repo (not a fork)\n'
f' if: >\n'
f' github.repository == \'horovod/horovod\' &&\n'
f' ( github.event_name != \'pull_request\' || github.event.pull_request.head.repo.full_name == github.repository )\n'
f' continue-on-error: true\n'
f' with:\n'
f' aws-access-key-id: ${{{{ secrets.AWS_ACCESS_KEY_ID }}}}\n'
f' aws-secret-access-key: ${{{{ secrets.AWS_SECRET_ACCESS_KEY }}}}\n'
f' aws-region: us-east-1\n'
f'\n'
f' - name: Login to Amazon ECR\n'
f' id: ecr\n'
f' if: steps.aws.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' uses: aws-actions/amazon-ecr-login@v1\n'
f'\n'
f' - name: Add cache_from to docker-compose YAML\n'
f' if: steps.ecr.outcome == \'success\'\n'
f' run: |\n'
f' cat > docker-compose.test.override.yml <<EOF\n'
f' version: \'2.3\'\n'
f' services:\n'
f' ${{{{ matrix.image }}}}:\n'
f' build:\n'
f' cache_from:\n'
f' - ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' EOF\n'
f' cat docker-compose.test.override.yml\n'
f' shell: bash\n'
f'\n'
f' - name: Pull latest test image\n'
f' if: steps.ecr.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' run: |\n'
f' docker pull ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' env:\n'
f' DOCKER_BUILDKIT: 1\n'
f'\n'
f' - name: Build\n'
f' id: build\n'
f' run: |\n'
f' override_yaml=""\n'
f' if [ -e docker-compose.test.override.yml ]; then override_yaml="-f docker-compose.test.override.yml"; fi\n'
f' .github/timeout-and-retry.sh ${{{{ matrix.build_timeout }}}}m 3 10 docker-compose -f docker-compose.test.yml $override_yaml build --pull ${{{{ matrix.image }}}}\n'
f' env:\n'
f' COMPOSE_DOCKER_CLI_BUILD: 1\n'
f' DOCKER_BUILDKIT: 1\n'
f'\n' +
'\n'.join([f' - name: "{test["label"]} [attempt {attempt} of {attempts}]"\n'
f' id: {test_id}_run_{attempt}\n'
f' continue-on-error: {"true" if attempt < attempts else "false"}\n'
f' if: always() && steps.build.outcome == \'success\' && matrix.{test_id} && {"true" if attempt == 1 else f"steps.{test_id}_run_{attempt-1}.outcome == {failure}"}\n'
f' run: |\n'
f' mkdir -p artifacts/${{{{ matrix.image }}}}/{test_id}_run_{attempt}\n'
f' docker-compose -f docker-compose.test.yml run -e GITHUB_ACTIONS --rm --volume "$(pwd)/artifacts/${{{{ matrix.image }}}}/{test_id}_run_{attempt}:/artifacts" ${{{{ matrix.image }}}} /usr/bin/timeout {test["timeout"]}m {test["command"]}\n'
f' shell: bash\n'
for test_id, test in sorted(tests.items(), key=lambda test: test[0])
for attempt in range(1, attempts+1)]) +
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always() && contains(matrix.image, \'-cpu-\')\n'
f' with:\n'
f' name: Unit Test Results - ${{{{ matrix.image }}}}\n'
f' path: artifacts/${{{{ matrix.image }}}}/**/*.xml\n'
f'\n'
f' - name: Push test image\n'
f' # We push test image to AWS ECR on push to Horovod master (not a fork)\n'
f' if: >\n'
f' github.event_name == \'push\' &&\n'
f' github.ref == \'refs/heads/master\' &&\n'
f' github.repository == \'horovod/horovod\' &&\n'
f' steps.ecr.outcome == \'success\'\n'
f' continue-on-error: true\n'
f' run: |\n'
f' docker image ls | head\n'
f' docker tag horovod_${{{{ matrix.image }}}} ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' docker push ${{{{ steps.ecr.outputs.registry }}}}/buildkite:horovod-${{{{ matrix.image }}}}-latest\n'
f' docker image ls | head\n'
f' shell: bash\n')
def build_and_test_macos(id: str, name: str, needs: List[str], attempts: int = 3) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
failure = "'failure'"
return (f' {id}:\n'
f' name: "{name} (${{{{ matrix.image }}}}-macos)"\n'
f' needs: [{", ".join(needs)}]\n'
f' if: >\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true'\n"
f' runs-on: macos-latest\n'
f'\n'
f' strategy:\n'
f' max-parallel: 3\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n'
f''
f' - image: test-cpu-openmpi-py3_7-tf1_15_5-keras2_2_4-torch1_6_0-mxnet1_5_1_p0\n'
f' HOROVOD_WITH_MPI: 1\n'
f' HOROVOD_WITHOUT_GLOO: 1\n'
f' TENSORFLOW: 1.15.0\n'
f' KERAS: 2.2.4\n'
f' PYTORCH: 1.6.0\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.7.0\n'
f' MXNET: 1.5.1.post0\n'
f'\n'
f' - image: test-cpu-gloo-py3_8-tf2_6_2-keras2_6_0-torch1_9_1-mxnet1_6_0\n'
f' HOROVOD_WITHOUT_MPI: 1\n'
f' HOROVOD_WITH_GLOO: 1\n'
f' TENSORFLOW: 2.6.5\n'
f' KERAS: 2.6.0\n'
f' PYTORCH: 1.9.1\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.10.1\n'
f' MXNET: 1.6.0\n'
f'\n'
f'' # mxnet 1.8.0.post0 does not compile for macos due to missing dnnl_config.h
f'' # mxnet 1.9.0 does not exist for macos
f' - image: test-openmpi-cpu-gloo-py3_8-tf2_7_0-keras2_7_0-torch1_10_1-mxnet1_7_0_p2\n'
f' HOROVOD_WITH_MPI: 1\n'
f' HOROVOD_WITH_GLOO: 1\n'
f' TENSORFLOW: 2.7.3\n'
f' KERAS: 2.7.0\n'
f' PYTORCH: 1.10.1\n'
f' PYTORCH_LIGHTNING: 1.3.8\n'
f' TORCHVISION: 0.11.2\n'
f' MXNET: 1.7.0.post2\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: recursive\n'
f'\n'
f' - name: Build\n'
f' id: build\n'
f' env:\n'
f' HOROVOD_WITH_MPI: ${{{{ matrix.HOROVOD_WITH_MPI }}}}\n'
f' HOROVOD_WITHOUT_MPI: ${{{{ matrix.HOROVOD_WITHOUT_MPI }}}}\n'
f' HOROVOD_WITH_GLOO: ${{{{ matrix.HOROVOD_WITH_GLOO }}}}\n'
f' HOROVOD_WITHOUT_GLOO: ${{{{ matrix.HOROVOD_WITHOUT_GLOO }}}}\n'
f' TENSORFLOW: ${{{{ matrix.TENSORFLOW }}}}\n'
f' KERAS: ${{{{ matrix.KERAS }}}}\n'
f' PYTORCH: ${{{{ matrix.PYTORCH }}}}\n'
f' PYTORCH_LIGHTNING: ${{{{ matrix.PYTORCH_LIGHTNING }}}}\n'
f' TORCHVISION: ${{{{ matrix.TORCHVISION }}}}\n'
f' MXNET: ${{{{ matrix.MXNET }}}}\n'
f'\n'
f' # The python patch in the pyenv install step is to work around an incompatibility introduced in new xcode version in macOS Big Sur. The patch is provided by python team.\n'
f' # The original discussion is here https://github.com/pyenv/pyenv/issues/1737\n'
f' run: |\n'
f' brew reinstall -f zlib bzip2\n'
f' brew install -f openmpi cmake libuv pyenv coreutils curl\n'
f' export PATH=$(pyenv root)/shims:$PATH\n'
f' pyenv uninstall -f 3.7.7\n'
f' CFLAGS="-I$(brew --prefix bzip2)/include -I$(brew --prefix zlib)/include" LDFLAGS="-L$(brew --prefix zlib)/lib -L$(brew --prefix bzip2)/lib" pyenv install --patch 3.7.7 < <(curl -sSL https://github.com/python/cpython/commit/8ea6353.patch)\n'
f' pyenv global 3.7.7\n'
f' python --version\n'
f'\n'
f' python -m pip install -U pip\n'
f' pip install tensorflow==${{TENSORFLOW}} keras==${{KERAS}}\n'
f' if [[ ${{TENSORFLOW}} == 1.* ]] || [[ ${{TENSORFLOW}} == 2.[012345].* ]]; then pip install "h5py<3" "protobuf~=3.20"; fi\n'
f' pip install torch==${{PYTORCH}} pytorch_lightning==${{PYTORCH_LIGHTNING}} torchvision==${{TORCHVISION}}\n'
f' pip install mxnet==${{MXNET}}\n'
f' HOROVOD_WITH_TENSORFLOW=1 HOROVOD_WITH_PYTORCH=1 HOROVOD_WITH_MXNET=1 pip install --no-cache-dir .[test]\n'
f' horovodrun --check-build\n'
f'\n' +
'\n'.join([f' - name: Test [attempt {attempt} of {attempts}]\n'
f' id: test-{attempt}\n'
f' continue-on-error: {"true" if attempt < attempts else "false"}\n'
f' if: always() && steps.build.outcome == \'success\' && {"true" if attempt == 1 else f"steps.test-{attempt-1}.outcome == {failure}"}\n'
f'\n'
f' run: |\n'
f' export PATH=$(pyenv root)/shims:$PATH\n'
f' pyenv global 3.7.7\n'
f' python --version\n'
f'\n'
f' artifacts_path="$(pwd)/artifacts/${{{{ matrix.image }}}}-macos-run-{attempt}"\n'
f' mkdir -p "$artifacts_path"\n'
f' echo "::set-output name=artifacts-path::$artifacts_path"\n'
f' echo pytest -v --capture=no --continue-on-collection-errors --junit-xml=$artifacts_path/junit.\$1.\${{HOROVOD_RANK:-\${{OMPI_COMM_WORLD_RANK:-\${{PMI_RANK}}}}}}.\$2.xml \${{@:2}} > pytest.sh\n'
f' chmod u+x pytest.sh\n'
f'\n'
f' cd test/parallel\n'
f' ls test_*.py | gtimeout 10m xargs -n 1 horovodrun -np 2 /bin/bash ../../pytest.sh macos\n'
for attempt in range(1, attempts+1)]) +
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always()\n'
f' with:\n'
f' name: Unit Test Results - ${{{{ matrix.image }}}}-macos\n'
f' path: |\n' +
'\n'.join([f' ${{{{ steps.test-{attempt}.outputs.artifacts-path }}}}'
for attempt in range(1, attempts+1)]))
def trigger_buildkite_job(id: str, name: str, needs: List[str], mode: str) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
return (f' {id}-trigger:\n'
f' name: "{name} (trigger Builtkite)"\n'
f' needs: [{", ".join(needs)}]\n'
f' runs-on: ubuntu-latest\n'
f' if: >\n'
f' github.repository == \'horovod/horovod\' &&\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true' &&\n"
f' ( github.event_name != \'pull_request\' || github.event.pull_request.head.repo.full_name == github.repository )\n'
f' outputs:\n'
f' url: ${{{{ steps.build.outputs.url }}}}\n'
f'\n'
f' steps:\n'
f' - name: Trigger Buildkite Pipeline\n'
f' id: build\n'
f' uses: EnricoMi/trigger-pipeline-action@master\n'
f' env:\n'
f' PIPELINE: "horovod/horovod"\n'
f' # COMMIT is taken from GITHUB_SHA\n'
f' BRANCH: "${{{{ needs.init-workflow.outputs.buildkite-branch-label }}}} ({mode})"\n'
f' # empty MESSAGE will be filled by Buildkite from commit message\n'
f' MESSAGE: "${{{{ needs.init-workflow.outputs.buildkite-message }}}}"\n'
f' BUILDKITE_API_ACCESS_TOKEN: ${{{{ secrets.BUILDKITE_TOKEN }}}}\n'
f' BUILD_ENV_VARS: "{{\\"PIPELINE_MODE\\": \\"{mode}\\"}}"\n'
f'\n'
f' {id}:\n'
f' name: "{name} (download Builtkite)"\n'
f' needs: [{id}-trigger]\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' steps:\n'
f' - name: Download Buildkite Artifacts\n'
f' id: download\n'
f' uses: EnricoMi/download-buildkite-artifact-action@v1\n'
f' with:\n'
f' buildkite_token: ${{{{ secrets.BUILDKITE_TOKEN }}}}\n'
f' buildkite_build_url: ${{{{ needs.{id}-trigger.outputs.url }}}}\n'
f' ignore_build_states: blocked,canceled,skipped,not_run\n'
f' ignore_job_states: timed_out\n'
f' output_path: artifacts/Unit Test Results - {mode} on Builtkite\n'
f'\n'
f' - name: Upload Test Results\n'
f' uses: actions/upload-artifact@v2\n'
f' if: always()\n'
f' with:\n'
f' name: Unit Test Results - {mode} on Builtkite\n'
f' path: artifacts/Unit Test Results - {mode} on Builtkite/**/*.xml\n' +
f'\n'
f' - name: Check Buildkite job state\n'
f' if: >\n'
f' always() &&\n'
f' steps.download.conclusion == \'success\' &&\n'
f' steps.download.outputs.build-state != \'passed\'\n'
f' run: |\n'
f' echo "::warning::Buildkite pipeline did not pass: ${{{{ needs.{id}-trigger.outputs.url }}}}"\n'
f' exit 1\n')
def publish_docker_images(needs: List[str], images: List[str]) -> str:
if 'init-workflow' not in needs:
needs.insert(0, 'init-workflow')
if needs != ['init-workflow', 'build-and-test', 'buildkite']:
raise RuntimeError('This job has hard-coded needs, which you may want to adjust')
return (f' docker-config:\n'
f' name: Configure docker build\n'
f' needs: [{", ".join(needs)}]\n'
f" # build-and-test and buildkite might have been skipped (! needs.init-workflow.outputs.run-builds-and-tests)\n"
f' # buildkite might have been skipped (workflow runs for a fork PR),\n'
f' # we still want to build docker images (though we might not want to push them)\n'
f' if: >\n'
f' always() &&\n'
f" needs.init-workflow.outputs.run-at-all == 'true' &&\n"
f" needs.init-workflow.outputs.run-builds-and-tests == 'true' &&\n"
f" needs.build-and-test.result == 'success' &&\n"
f" ( needs.buildkite.result == 'success' || needs.buildkite.result == 'skipped' )\n"
f' runs-on: ubuntu-latest\n'
f' outputs:\n'
f' run: ${{{{ steps.config.outputs.run }}}}\n'
f' push: ${{{{ steps.config.outputs.push }}}}\n'
f'\n'
f' steps:\n'
f' - name: Config\n'
f' id: config\n'
f' env:\n'
f' # run workflow for all events on Horovod repo and non-schedule events on forks\n'
f' run: ${{{{ github.repository == \'horovod/horovod\' || github.event_name != \'schedule\' }}}}\n'
f' # push images only from Horovod repo and for schedule and push events\n'
f' push: ${{{{ github.repository == \'horovod/horovod\' && contains(\'schedule,push\', github.event_name) }}}}\n'
f' run: |\n'
f' echo Repository: ${{{{ github.repository }}}}\n'
f' echo Event: ${{{{ github.event_name }}}}\n'
f' echo Run: $run\n'
f' echo "::set-output name=run::$run"\n'
f' echo Push: $push\n'
f' echo "::set-output name=push::$push"\n'
f'\n'
f' docker-build:\n'
f' name: Build docker image ${{{{ matrix.docker-image }}}} (push=${{{{ needs.docker-config.outputs.push }}}})\n'
f' needs: docker-config\n'
f' if: always() && needs.docker-config.outputs.run == \'true\'\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' # we want an ongoing run of this workflow to be canceled by a later commit\n'
f' # so that there is only one concurrent run of this workflow for each branch\n'
f' concurrency:\n'
f' # github.ref means something like refs/heads/master or refs/tags/v0.22.1 or the branch.\n'
f' # This helps to not cancel concurrent runs on master and a tag that share the same commit\n'
f' # head_ref refers to the pull request branch so we run only one workflow for the given pull request.\n'
f' # On master, head_ref is empty, so we use the SHA of the commit, this means\n'
f' # commits to master will not be cancelled, which is important to ensure\n'
f' # that every commit to master is full tested and deployed.\n'
f' group: docker-${{{{ matrix.docker-image }}}}-${{{{ github.ref }}}}-${{{{ github.head_ref || github.sha }}}}\n'
f' cancel-in-progress: true\n'
f'\n'
f' strategy:\n'
f' fail-fast: false\n'
f' matrix:\n'
f' docker-image:\n' +
''.join([f' - {image}\n'
for image in images]) +
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v2\n'
f' with:\n'
f' submodules: \'recursive\'\n'
f'\n'
f' - name: Docker meta\n'
f' id: meta\n'
f' uses: crazy-max/ghaction-docker-meta@v2\n'
f' with:\n'
f' # list of Docker images to use as base name for tags\n'
f' images: |\n'
f' horovod/${{{{ matrix.docker-image }}}}\n'
f' # generate Docker tags based on the following events/attributes\n'
f' tags: |\n'
f' type=schedule\n'
f' type=ref,event=branch\n'
f' type=ref,event=pr\n'
f' type=semver,pattern={{{{version}}}}\n'
f' type=semver,pattern={{{{major}}}}.{{{{minor}}}}\n'
f' type=semver,pattern={{{{major}}}}\n'
f' type=sha\n'
f'\n'
f' - name: Set up QEMU\n'
f' uses: docker/setup-qemu-action@v1\n'
f' - name: Set up Docker Buildx\n'
f' uses: docker/setup-buildx-action@v1\n'
f'\n'
f' - name: Login to DockerHub\n'
f' if: needs.docker-config.outputs.push == \'true\'\n'
f' uses: docker/login-action@v1\n'
f' with:\n'
f' username: ${{{{ secrets.DOCKERHUB_USERNAME }}}}\n'
f' password: ${{{{ secrets.DOCKERHUB_TOKEN }}}}\n'
f'\n'
f' - name: Clean up disk space\n'
f' # deleting these paths frees 38 GB disk space:\n'
f' # sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc\n'
f' # but this sometimes takes 3-4 minutes\n'
f' # so we delete only some sub-paths which are known to be quick (10s) and 20 GB\n'
f' run: |\n'
f' echo ::group::Disk space before clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' for dir in /usr/share/dotnet/sdk/\*/nuGetPackagesArchive.lzma \\\n'
f' /usr/share/dotnet/shared \\\n'
f' /usr/local/lib/android/sdk/ndk \\\n'
f' /usr/local/lib/android/sdk/build-tools \\\n'
f' /opt/ghc\n'
f' do\n'
f' echo ::group::Deleting "$dir"\n'
f' sudo du -hsc $dir | tail -n1 || true\n'
f' sudo rm -rf $dir\n'
f' echo ::endgroup::\n'
f' done\n'
f'\n'
f' echo ::group::Disk space after clean up\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' - name: Build and push\n'
f' uses: docker/build-push-action@v2\n'
f' timeout-minutes: 60\n'
f' with:\n'
f' context: .\n'
f' file: ./docker/${{{{ matrix.docker-image }}}}/Dockerfile\n'
f' push: ${{{{ needs.docker-config.outputs.push }}}}\n'
f' tags: ${{{{ steps.meta.outputs.tags }}}}\n'
f' labels: ${{{{ steps.meta.outputs.labels }}}}\n'
f'\n'
f' - name: Show free space\n'
f' if: always()\n'
f' run: |\n'
f' echo ::group::Disk Space\n'
f' df -h\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Space\n'
f' docker system df\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Images\n'
f' docker images -a\n'
f' echo ::endgroup::\n'
f'\n'
f' echo ::group::Docker Container\n'
f' docker container list -a\n'
f' echo ::endgroup::\n')
def sync_files(needs: List[str]) -> str:
return (f' sync-files:\n'
f' name: "Sync Files (${{{{ matrix.name }}}})"\n'
f' needs: [{", ".join(needs)}]\n'
f' runs-on: ubuntu-latest\n'
f'\n'
f' strategy:\n'
f' fail-fast: false\n'
f' matrix:\n'
f' include:\n'
f' - name: Docs Summary\n'
f' left_file: README.rst\n'
f' right_file: docs/summary.rst\n'
f' init: sed -i -e s/docs\///g README.rst\n'
f'\n'
f' - name: Examples Keras Spark3\n'
f' left_file: examples/spark/keras/keras_spark_rossmann_run.py\n'
f' right_file: examples/spark/keras/keras_spark3_rossmann.py\n'
f'\n'
f' steps:\n'
f' - name: Checkout\n'
f' uses: actions/checkout@v1\n'
f'\n'
f' - name: Diffing ${{{{ matrix.left_file }}}} with ${{{{ matrix.right_file }}}}\n'
f' env:\n'
f' LEFT: ${{{{ matrix.left_file }}}}\n'
f' RIGHT: ${{{{ matrix.right_file }}}}\n'
f' INIT: ${{{{ matrix.init }}}}\n'
f' run: |\n'
f' $INIT\n'
f'\n'
f' patch --quiet -p0 $LEFT ${{RIGHT}}.patch -o ${{LEFT}}.expected\n'
f' if ! diff -q ${{LEFT}}.expected --label $LEFT $RIGHT\n'
f' then\n'
f' echo\n'
f' echo "::error::Files are out-of-sync: $LEFT vs. $RIGHT"\n'
f' echo "Unexpected differences are:"\n'
f' diff ${{LEFT}}.expected --label $LEFT $RIGHT || true\n'
f'\n'
f' echo\n'
f' echo "Use the following as ${{RIGHT}}.patch to accept those changes:"\n'
f' diff $LEFT $RIGHT || true\n'
f'\n'
f' false\n'
f' fi\n')
with open(path.joinpath('workflows', 'ci.yaml').absolute(), 'wt') as w:
mins = ['tfmin', 'torchmin', 'mxnetmin']
heads = ['tfhead', 'torchhead', 'mxnethead']
allmin_images = [image for image in images if all(min in image for min in mins)]
allhead_images = [image for image in images if all(head in image for head in heads)]
release_images = [image for image in images if image not in allhead_images + allmin_images]
cpu_release_images = [image for image in release_images if '-cpu-' in image]
gpu_release_images = [image for image in release_images if '-gpu-' in image or '-mixed-' in image]
workflow = workflow_header() + jobs(
init_workflow_job(),
# changing these names require changes in the workflow-conclusion step in ci-results.yaml
build_and_test_images(id='build-and-test', name='Build and Test', needs=['init-workflow'], images=release_images, parallel_images=len(cpu_release_images), tests_per_image=tests_per_image, tests=tests),
build_and_test_images(id='build-and-test-heads', name='Build and Test heads', needs=['build-and-test'], images=allhead_images, tests_per_image=tests_per_image, tests=tests),
build_and_test_images(id='build-mins', name='Build mins', needs=['build-and-test'], images=allmin_images, tests_per_image=tests_per_image, tests={}),
build_and_test_macos(id='build-and-test-macos', name='Build and Test macOS', needs=['build-and-test']),
trigger_buildkite_job(id='buildkite', name='Build and Test GPU', needs=['build-and-test'], mode='GPU NON HEADS'),
trigger_buildkite_job(id='buildkite-heads', name='Build and Test GPU heads', needs=['build-and-test'], mode='GPU HEADS'),
publish_docker_images(needs=['build-and-test', 'buildkite'], images=['horovod', 'horovod-cpu', 'horovod-ray']),
sync_files(needs=['init-workflow'])
)
print(workflow, file=w, end='')
if __name__ == "__main__":
main()
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from myconfigparser import UnitParser, load_unit_data, Decoration
file_path = '../../development/table/unit.ini'
def do(file_path=file_path):
with open(file_path) as f:
data = load_unit_data(f, parser=UnitParser)
for unit in data.sections():
classifications = unit.list('type')
if 'townhall' in classifications:
classifications.remove('townhall')
unit['type'] = f'"{','.join(classifications)}"'
print(unit['Name'])
fil = lambda u: 'townhall' in u.list('type')
for unit in filter(fil, data.sections()):
print(unit['Name'])
break
else:
print("Success! Empty.")
with open(file_path, 'w') as f:
data.write(f)
| import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from myconfigparser import UnitParser, load_unit_data, Decoration
file_path = '../../development/table/unit.ini'
def do(file_path=file_path):
with open(file_path) as f:
data = load_unit_data(f, parser=UnitParser)
for unit in data.sections():
classifications = unit.list('type')
if 'townhall' in classifications:
classifications.remove('townhall')
unit['type'] = f'"{",".join(classifications)}"'
print(unit['Name'])
fil = lambda u: 'townhall' in u.list('type')
for unit in filter(fil, data.sections()):
print(unit['Name'])
break
else:
print("Success! Empty.")
with open(file_path, 'w') as f:
data.write(f)
|
from displayarray import read_updates
with read_updates(0) as a, read_updates(0) as b:
for i in range(1000):
a.update()
b.update()
try:
print(a.frames == b.frames)
except ValueError:
print(f"frame comparison: {(a.frames["0"][0] == b.frames["0"][0]).all()}")
| from displayarray import read_updates
with read_updates(0) as a, read_updates(0) as b:
for i in range(1000):
a.update()
b.update()
try:
print(a.frames == b.frames)
except ValueError:
print(f"frame comparison: {(a.frames['0'][0] == b.frames['0'][0]).all()}")
|
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import re
import os
from jittor_utils import LOG, run_cmd, simple_timer
import json
from collections import OrderedDict
def parse_attrs(s):
'''parse @attrs(..., x=y) syntax'''
attrs = {}
if s is None: return attrs
for a in s.split(','):
a = a.strip()
if len(a)==0: continue
if '=' in a:
k, v = a.split('=')
attrs[k] = v
else:
attrs[a] = 1
return attrs
pytype_map = {
"const char*": ["PyUnicode_AsUTF8", "PyUnicode_FromString", "PyUnicode_CheckExact"],
"int": ["PyLong_AsLong", "PyLong_FromLong", "PyLong_CheckExact"],
"int64": ["PyLong_AsLongLong", "PyLong_FromLongLong", "PyLong_CheckExact"],
"uint": ["PyLong_AsUnsignedLong", "PyLong_FromUnsignedLong", "PyLong_CheckExact"],
"uint64": ["PyLong_AsUnsignedLongLong", "PyLong_FromUnsignedLongLong", "PyLong_CheckExact"],
"void": ["...", "GET_PY_NONE", "..."],
}
def get_pytype_map(T, i):
if T in pytype_map:
return pytype_map[T][i]
return ["from_py_object", "to_py_object", "is_type"][i]+"<"+T+">"
binary_number_slots = {
"__add__": "nb_add",
"__sub__": "nb_subtract",
"__mul__": "nb_multiply",
"__mod__": "nb_remainder",
"__divmod__": "nb_divmod",
"__pow__": "nb_power",
"__lshift__": "nb_lshift",
"__rshift__": "nb_rshift",
"__and__": "nb_and",
"__xor__": "nb_xor",
"__or__": "nb_or",
"__floordiv__": "nb_floor_divide",
"__truediv__": "nb_true_divide",
"__matmul__": "nb_matrix_multiply",
}
for k,v in list(binary_number_slots.items()):
# __add__: nb_add ----> __iadd: nb_inplace_add
binary_number_slots["__i"+k[2:]] = "nb_inplace"+v[2:]
unary_number_slots = {
"__neg__": "nb_negative",
"__abs__": "nb_absolute",
}
def split_args(s):
# split args xxx,xxx, xx<xx,xx>, xx
s = s.strip()
if s=="": return []
prev = -1
presum = 0
args = []
for i in range(len(s)):
if s[i]=='<':
presum += 1
elif s[i]=='>':
presum -= 1
if presum==0 and s[i]==',':
args.append(s[prev+1:i])
prev = i
args.append(s[prev+1:])
return args
def get_def_code(df, scope_name, pyname, self_as_arg0=False):
is_fast_call = not pyname.startswith("__")
no_need_convert = pyname == "__getitem__"
args = df["args"]
# n==1 && PyXXX__CheckExact(args[0]) && ...
max_args = len(args)
min_args = max_args
for tid, a in enumerate(args):
if a[2] != "":
min_args = tid
break
arg_names = [ f"args[{i}]" for i in range(len(args))]
if self_as_arg0:
max_args -= 1
min_args -= 1
arg_names = ["self"] + arg_names[:-1]
kw_args_id = []
for aid, arg in enumerate(args):
if "VarHolder*" != arg[0] and is_fast_call:
kw_args_id.append(aid)
func_quick_check_runable = ""
func_quick_check_size = f"n<={max_args} && n>={min_args}"
if len(kw_args_id):
func_quick_check_size = f"n+(kw?Py_SIZE(kw):0)<={max_args} && n+(kw?Py_SIZE(kw):0)>={min_args}"
fill_with_default = ""
func_args_convert = ""
func_call = df["func_name"]+"("
pytypes = [ get_pytype_map(a[0],0) for a in args ]
for tid, tpc in enumerate(pytypes):
check = get_pytype_map(args[tid][0],2)
default_arg = args[tid][2]
jtp = args[tid][0]
holder_dec = ""
holder_set = ""
if jtp == "VarHolder*":
holder_dec = f"unique_ptr<VarHolder> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
if len(default_arg):
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid};
if (n>{tid-self_as_arg0}) {{
CHECK(({check}({arg_names[tid]})));
arg{tid} = {tpc}({arg_names[tid]}{holder_set});
arg_filled |= 1ull << {tid};
}}
"""
fill_with_default += f"""
if (!(arg_filled & (1ull<<{tid}))) {{
arg{tid} = {default_arg};
}}
"""
else:
func_quick_check_runable += f" && {check}({arg_names[tid]})"
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid} = {tpc}({arg_names[tid]}{holder_set});
"""
if tid: func_call += ","
if args[tid][3].endswith("&&"):
func_call += f"move(arg{tid})"
else:
func_call += f"arg{tid}"
if pyname == "__richcmp__":
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if rname in df["attrs"]:
func_quick_check_runable += " && op==Py_"+rname[2:-2].upper()
# fill args with keyword arguments
fill_with_kw = ""
if is_fast_call and len(kw_args_id):
fill_with_kw = f"""
if (kw) {{
auto kw_n = Py_SIZE(kw);
for (int i=0; i<kw_n; i++) {{
auto ko = PyTuple_GET_ITEM(kw, i);
auto vo = args[i+n];
auto ks = PyUnicode_AsUTF8(ko);
uint khash = hash(ks);
{"".join([
f'''
if (khash == {get_hash(args[aid][1])}u) {{
// hash match {args[aid][1]}
CHECK(({get_pytype_map(args[aid][0],2)}(vo)));
arg{aid} = {pytypes[aid]}(vo);
arg_filled |= 1ull << {aid};
continue;
}}
'''
for aid in kw_args_id
])}
LOGf << "Not a valid keyword:" << ks;
}}
}}
"""
if len(args):
func_args_convert += """
CHECK(!PyErr_Occurred());
"""
func_call += ")"
if df["is_property"]:
if pyname.startswith("__get__"):
func_call = df["func_name"]
else:
assert pyname.startswith("__set__"), pyname
func_call = df["func_name"] + "= arg0"
has_return = df["return_t"]!="void" and df["return_t"]!=""
# add XXX::xxx or XXX->xxx if is class def
if df["is_scope_def"]:
if df["is_static"]:
func_call = f"{scope_name}::" + func_call
else:
func_call = f"(GET_RAW_PTR({scope_name},self))->" + func_call
if pyname == "__init__":
# XXX->xxx(...) ---> new XXX xxx(...)
assert "->" in func_call
func_call = "new " + func_call.replace("->", " ")
if no_need_convert:
func_quick_check_runable = ""
func_args_convert = ""
fill_with_kw = fill_with_default = ""
return (
func_quick_check_size + func_quick_check_runable,
func_args_convert,
fill_with_kw+fill_with_default,
func_call,
has_return
)
hash_to_key_map = {}
def get_hash(s):
mask = (1<<32)-1
v=0
mul = 1
for c in s:
v += mul * ord(c)
mul *= 55
v &= mask
mul &= mask
if v in hash_to_key_map:
assert hash_to_key_map[v] == s, \
f"hash conflict {hash_to_key_map[v]} {s} {hash_to_key_map}"
hash_to_key_map[v] = s
return v
reg = re.compile(
'(/\\*(.*?)\\*/\\s*)?(//\\s*@pyjt\\(([^\\n]*)\\)\\s*)'
# ^^^^^^^^^^^^^^^^^ ^^^^ ^^^^
# doc string $1 pyjt args $3
+
'(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
# ^^^^^ ^^^^^^^
# attrs args $5
, re.DOTALL)
def compile_src(src, h, basename):
res = list(reg.finditer(src, re.S))
if len(res)==0: return
class_ranges = None
class_name = None
class_info = None
submodule_name = None
submodule_ranges = None
submodule_info = None
defs = []
LOG.vv(("find in", h))
for x in res:
LOG.vvv((x, x.groups()))
g = x.groups()
doc = g[1]
pyjt = g[3]
attrs = g[5]
esplit = lambda x: [] if x==None else \
[ a.strip() for a in x.split(",") if len(a.strip()) ]
attrs = parse_attrs(attrs)
pynames = esplit(pyjt)
end = x.end()
def find_bc(i):
while src[i] not in "({;":
i += 1
j = i+1
if src[i]==';':
return i, j
presum = 1
while True:
if src[j] in "({[":
presum += 1
elif src[j] in ")}]":
presum -= 1
if presum==0:
s = src[i]+src[j]
assert s in ("()","{}","()"), "braces not match "+s
return i, j
j += 1
# // @pyjt(DType)
# struct DType {
# ^ --> a
# .....
# } <--- b
# or
# // @pyjt(hash)
# inline uint hash(const char* input)
# ^ --> a ^ --> b
a, b = find_bc(end)
is_property = 0
if src[a] == ';':
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
is_property = 1
if src[a] == '{':
assert len(pynames)==1
if "submodule" in attrs:
assert submodule_ranges==None
submodule_ranges = (a, b)
submodule_name = src[end:a-1].strip().split()[-1]
submodule_info = {
"pynames": pynames,
"attrs": attrs
}
continue
assert class_ranges==None
class_ranges = (a, b)
class_name = src[end:a-1].strip().split()[-1]
class_info = {
"pynames": pynames,
"attrs": attrs
}
continue
is_scope_def = False
is_static = False
scope_name = ""
if class_ranges != None:
if class_ranges[0] < a and a < class_ranges[1]:
is_scope_def = True
scope_name = class_name
if submodule_ranges != None:
if submodule_ranges[0] < a and a < submodule_ranges[1]:
is_scope_def = True
scope_name = submodule_name
is_static = True
dec = src[end:b+1].strip()
arr = src[end:a].strip().split()
func_name = arr[-1]
is_constructor = False
if is_scope_def and func_name==class_name:
is_constructor = True
args = []
for arg in split_args(src[a+1:b]):
if arg=="": continue
default = ""
if "=" in arg:
arg, default = arg.split('=')
default = default
arg = arg.strip()
name = arg.split(' ')[-1]
tp = arg[:-len(name)]
tp = tp.strip()
prev_tp = tp
# const string& ----> string
if tp.startswith("const") and tp.endswith("&"):
tp = tp[5:-1].strip()
# T&& -> T
if tp.endswith("&&"):
tp = tp[:-2].strip()
# ArrayArgs& -> ArrayArgs
if tp.endswith("&"):
tp = tp[:-1].strip()
args.append((tp, name.strip(), default.strip(), prev_tp))
return_t = ""
for a in arr[:-1]:
if a in ["", "inline", "constexpr"]: continue
if a == "static":
is_static = True
continue
if return_t != "": return_t += " "
return_t += a
if is_scope_def and class_info and "submodule" in class_info["attrs"]:
is_static = True
for pid, pyname in enumerate(pynames):
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if pyname.endswith(rname):
attrs[rname] = 1
pynames[pid] = pyname.replace(rname, "__richcmp__")
def_info = {
"is_scope_def": is_scope_def,
"is_constructor": is_constructor,
"is_static": is_static,
"is_property": is_property,
"func_name": func_name,
"args": args, # [(type,name,defaut), ...]
"return_t": return_t, # return type
"dec": dec, # full string of xxx(A a, B b)
"pynames": pynames, # names in @pyjt(...)
"attrs": attrs, # attrs in @attrs(...)
"doc": doc,
"scope_name": scope_name,
}
if is_property:
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
assert is_scope_def and not is_static
def_info["is_property"] = 1
def_info["pynames"] = ["__get__"+n for n in pynames]
assert return_t != "void"
defs.append(dict(def_info))
def_info["pynames"] = ["__set__"+n for n in pynames]
assert len(args) == 0
def_info["args"] = [(def_info["return_t"], func_name, "", "")]
def_info["return_t"] = "void"
defs.append(dict(def_info))
continue
else:
defs.append(def_info)
LOG.vvv(json.dumps(def_info, indent=4))
# deal with defs
if len(defs) == 0: return
# include_name = h[4:] # remove "src/" prefix
include_name = h
code = []
class_defs_code = []
class_getsets_code = []
class_gets = OrderedDict()
class_sets = OrderedDict()
class_slots_code = []
submodule_defs_code = []
def_targets = OrderedDict()
for df in defs:
for name in df["pynames"]:
if df["is_scope_def"] and '.' not in name:
if df["scope_name"] == class_name:
name = class_info["pynames"][0] + '.' + name
else:
name = submodule_info["pynames"][0] + '.' + name
if name not in def_targets:
def_targets[name] = []
def_targets[name].append(df)
for name in def_targets:
dfs = def_targets[name]
target_scope_name = None
LOG.vv(name)
if "." in name:
target_scope_name, name = name.split(".")
# array for each df:
arr_func_quick_check_runable = []
arr_func_args_convert = []
arr_fill_with_default = []
arr_func_call = []
arr_has_return = []
self_as_arg0 = False
for df in dfs:
self_as_arg0 = class_info and \
target_scope_name == class_info["pynames"][0] and \
df["scope_name"] == submodule_name \
and not name.startswith("__")
res = get_def_code(df, df["scope_name"], name, bool(self_as_arg0))
arr_func_quick_check_runable.append(res[0])
arr_func_args_convert.append(res[1])
arr_fill_with_default.append(res[2])
arr_func_call.append(res[3])
arr_has_return.append(res[4])
slot_name = None
func_cast = ""
func_fill = ""
if name == "__init__":
slot_name = "tp_init"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> int"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__repr__":
slot_name = "tp_repr"
func_head = "(PyObject* self) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__get__"):
slot_name = "tp_gets"
name = name[len("__get__"):]
func_head = "(PyObject* self, void*) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__set__"):
slot_name = "tp_sets"
name = name[len("__set__"):]
func_head = "(PyObject* self, PyObject* arg, void*) -> int"
func_fill = """
int64 n=1;
PyObject** args = &arg;
(void)n, (void)args;
"""
elif name == "__call__":
slot_name = "tp_call"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> PyObject*"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__dealloc__":
slot_name = "tp_dealloc"
func_head = "(PyObject* self) -> void"
func_fill = "int64 n = 0"
elif name in binary_number_slots:
slot_name = "tp_as_number->"+binary_number_slots[name]
func_head = "(PyObject* self, PyObject* b) -> PyObject*"
if name.endswith("pow__"):
func_head = "(PyObject* self, PyObject* b, PyObject*) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name in unary_number_slots:
slot_name = "tp_as_number->"+unary_number_slots[name]
func_head = "(PyObject* self) -> PyObject*"
func_fill = """
int64 n = 1;
PyObject* args[] = {self};
(void)n, (void)args;
"""
elif name == "__richcmp__":
slot_name = "tp_richcompare"
func_head = "(PyObject* self, PyObject* b, int op) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name == "__len__":
slot_name = "tp_as_sequence->sq_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__map_len__":
slot_name = "tp_as_mapping->mp_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__getitem__":
slot_name = "tp_as_sequence->sq_item"
func_head = "(PyObject* self, Py_ssize_t arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
(void)n;
if (arg0 >= GET_RAW_PTR({dfs[0]["scope_name"]},self)->size()) {{
PyErr_SetString(PyExc_IndexError, "");
return 0;
}}
"""
elif name == "__map_getitem__":
slot_name = "tp_as_mapping->mp_subscript"
func_head = "(PyObject* self, PyObject* arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
PyObject* args[] = {{arg0}};
(void)n;
"""
elif name.startswith("__"):
LOG.f(f"Not support slot {name}")
continue
else:
func_head = "(PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*"
func_cast = f"(PyCFunction)(PyObject* (*)(PyObject*,PyObject**,int64,PyObject*))"
# if not return, return py_none
arr_has_return = [ True for _ in arr_has_return ]
arr_func_return = []
doc_all = ""
decs = "Declarations:\n"
for did, has_return in enumerate(arr_has_return):
df = dfs[did]
func_call = arr_func_call[did]
if df["doc"]:
doc_all += "Document:\n"
doc_all += df["doc"]
doc_all += "\nDeclaration:\n"
doc_all += df["dec"]
decs += df["dec"]+'\n'
if has_return:
assert "-> int" not in func_head
if "-> PyObject*" in func_head:
if "return_self" in df["attrs"]:
arr_func_return.append(
f"return (({func_call}), Py_INCREF(self), self)")
else:
arr_func_return.append(
f"return {get_pytype_map(df["return_t"],1)}(({func_call}))")
func_return_failed = "return nullptr"
else:
arr_func_return.append(
f"return ({func_call});")
func_return_failed = "return -1"
else:
if "-> int" in func_head:
arr_func_return.append(f"return ({func_call},0)")
func_return_failed = "return -1"
else:
assert "-> void" in func_head
arr_func_return.append(f"{func_call};return")
func_return_failed = "return"
func = f"""
{func_cast}[]{func_head} {{
try {{
{func_fill};
uint64 arg_filled=0;
(void)arg_filled;
{"".join([f'''
if ({arr_func_quick_check_runable[did]}) {{
{arr_func_args_convert[did]};
{arr_fill_with_default[did]};
{arr_func_return[did]};
}}
'''
for did in range(len(arr_func_return))
])}
LOGf << "Not a valid call";
}} catch (const std::exception& e) {{
PyErr_Format(PyExc_RuntimeError, "%s\\n%s",
e.what(),
R""({decs})""
);
}}
{func_return_failed};
}}
"""
if slot_name:
if slot_name=="tp_gets":
class_gets[name] = {
"func": func,
"doc": doc_all
}
continue
if slot_name=="tp_sets":
class_sets[name] = {
"func": func,
"doc": ""
}
continue
class_slots_code.append(f"""
tp.{slot_name} = {func};
""")
continue
need_static = ""
if df["is_scope_def"] and df["is_static"] and \
df["scope_name"] == class_name and \
"submodule" not in class_info["attrs"]:
need_static = " | METH_STATIC"
func = (f"""
{{ R""({name})"",
{func},
METH_FASTCALL | METH_KEYWORDS{need_static},
R""({doc_all})""
}}""")
if df["is_scope_def"]:
if df["scope_name"] == class_name or \
(class_info and \
target_scope_name == class_info["pynames"][0]):
class_defs_code.append(func)
else:
submodule_defs_code.append(func)
else:
code.append(func)
prop_names = list(set(class_gets.keys()).union(class_sets.keys()))
prop_names = sorted(prop_names)
for prop_name in prop_names:
get_func = "NULL"
set_func = "NULL"
doc = ""
if prop_name in class_gets:
get_func = class_gets[prop_name]["func"]
if class_gets[prop_name]["doc"]:
doc += class_gets[prop_name]["doc"]
if prop_name in class_sets:
set_func = class_sets[prop_name]["func"]
if class_sets[prop_name]["doc"]:
doc += class_sets[prop_name]["doc"]
class_getsets_code.append(f"""
{{"{prop_name}", {get_func}, {set_func}, R""({doc})""}}
""")
code.append("{0,0,0,0}")
class_defs_code.append("{0,0,0,0}")
class_getsets_code.append("{0,0,0,0}")
submodule_defs_code.append("{0,0,0,0}")
core_name = "jittor_core"
if class_info and "attrs" in class_info and "core_name" in class_info["attrs"]:
core_name = class_info["attrs"]["core_name"]
if submodule_info and "attrs" in submodule_info and "core_name" in submodule_info["attrs"]:
core_name = submodule_info["attrs"]["core_name"]
has_map = class_name in ["VarHolder", "NanoVector"]
has_seq = class_name == "NanoVector"
code = f"""
#include "pyjt/py_converter.h"
#include "common.h"
#include "{include_name}"
namespace jittor {{
{
"" if class_name is None else
f"PyHeapTypeObject Pyjt{class_name};" if "heaptype" in class_info["attrs"] else
f"PyTypeObject Pyjt{class_name};"
}
void pyjt_def_{basename}(PyObject* m) {{
static PyMethodDef defs[] = {{
{",".join(code)}
}};
ASSERT(PyModule_AddFunctions(m, defs)==0);
{
f'''
static PyMethodDef class_defs[] = {{
{",".join(class_defs_code)}
}};
static PyGetSetDef class_getsets[] = {{
{",".join(class_getsets_code)}
}};
static PyNumberMethods number_methods = {{0}};
{f"auto& htp =Pyjt{class_name}; auto& tp = htp.ht_type;"
if "heaptype" in class_info["attrs"] else
f"auto& tp = Pyjt{class_name};"}
tp.tp_as_number = &number_methods;
{f"static PyMappingMethods class_map_defs = {{0}};" if has_map else ""}
{f"tp.tp_as_mapping = &class_map_defs;" if has_map else ""}
{f"static PySequenceMethods class_seq_defs = {{0}};" if has_seq else ""}
{f"tp.tp_as_sequence = &class_seq_defs;" if has_seq else ""}
tp.tp_name = "{core_name}.{class_info["pynames"][0]}";
tp.tp_basicsize = GET_OBJ_SIZE({class_name});
tp.tp_new = PyType_GenericNew;
tp.tp_flags = Py_TPFLAGS_DEFAULT;
{"tp.tp_flags |= Py_TPFLAGS_HEAPTYPE; htp.ht_name = htp.ht_qualname = to_py_object<string>(tp.tp_name);"
if "heaptype" in class_info["attrs"] else ""}
tp.tp_methods = &class_defs[0];
tp.tp_getset = &class_getsets[0];
{"".join(class_slots_code)};
ASSERT(0==PyType_Ready(&tp)) << (PyErr_Print(), 0);
Py_INCREF(&tp);
ASSERT(0==PyModule_AddObject(m, "{class_info["pynames"][0]}", (PyObject*)&tp));
''' if class_name is not None else ""
}
{f'''
// sub module def
static PyMethodDef submodule_defs[] = {{
{",".join(submodule_defs_code)}
}};
auto sub = PyImport_AddModule("{core_name}.{submodule_info["pynames"][0]}");
ASSERT(PyModule_AddFunctions(sub, submodule_defs)==0);
ASSERT(sub);
ASSERT(0==PyModule_AddObject(m, "{submodule_info["pynames"][0]}", sub));
''' if submodule_name is not None else ""
}
}}
}}
"""
return code
def compile_single(head_file_name, src_file_name, src=None):
basename = head_file_name.split("/")[-1].split(".")[0]
if src==None:
with open(head_file_name, 'r') as f:
src = f.read()
code = compile_src(src, head_file_name, basename)
if not code: return False
LOG.vvv("write to", src_file_name)
LOG.vvvv(code)
with open(src_file_name, 'w') as f:
f.write(code)
return True
def compile(cache_path, jittor_path):
headers1 = run_cmd('find -L src/ | grep ".h$"', jittor_path).splitlines()
headers2 = run_cmd('find gen/ | grep ".h$"', cache_path).splitlines()
headers = [ os.path.join(jittor_path, h) for h in headers1 ] + \
[ os.path.join(cache_path, h) for h in headers2 ]
basenames = []
for h in headers:
with open(h, 'r') as f:
src = f.read()
# jit_op_maker.h merge compile with var_holder.h
if h.endswith("src/var_holder.h"): continue
if h.endswith("jit_op_maker.h"):
with open(os.path.join(jittor_path, "src", "var_holder.h"), "r") as f:
src = f.read() + src
basename = h.split("/")[-1].split(".")[0]
fname = "pyjt_"+basename+".cc"
fname = os.path.join(cache_path, "gen", fname)
check = compile_single(h, fname, src)
if not check: continue
basenames.append(basename)
code = f"""
#include "pyjt/numpy.h"
#include "pyjt/py_converter.h"
#include "common.h"
namespace jittor {{
{ " ".join([f"extern void pyjt_def_{n}(PyObject* m);" for n in basenames])}
void pyjt_def_all(PyObject* m) {{
numpy_init();
{ " ".join([f"pyjt_def_{n}(m);" for n in basenames])}
}}
}}
"""
fname = os.path.join(cache_path, "gen", "pyjt_all.cc")
LOG.vvv(("write to", fname))
LOG.vvvv(code)
with open(fname, "w") as f:
f.write(code)
| # ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import re
import os
from jittor_utils import LOG, run_cmd, simple_timer
import json
from collections import OrderedDict
def parse_attrs(s):
'''parse @attrs(..., x=y) syntax'''
attrs = {}
if s is None: return attrs
for a in s.split(','):
a = a.strip()
if len(a)==0: continue
if '=' in a:
k, v = a.split('=')
attrs[k] = v
else:
attrs[a] = 1
return attrs
pytype_map = {
"const char*": ["PyUnicode_AsUTF8", "PyUnicode_FromString", "PyUnicode_CheckExact"],
"int": ["PyLong_AsLong", "PyLong_FromLong", "PyLong_CheckExact"],
"int64": ["PyLong_AsLongLong", "PyLong_FromLongLong", "PyLong_CheckExact"],
"uint": ["PyLong_AsUnsignedLong", "PyLong_FromUnsignedLong", "PyLong_CheckExact"],
"uint64": ["PyLong_AsUnsignedLongLong", "PyLong_FromUnsignedLongLong", "PyLong_CheckExact"],
"void": ["...", "GET_PY_NONE", "..."],
}
def get_pytype_map(T, i):
if T in pytype_map:
return pytype_map[T][i]
return ["from_py_object", "to_py_object", "is_type"][i]+"<"+T+">"
binary_number_slots = {
"__add__": "nb_add",
"__sub__": "nb_subtract",
"__mul__": "nb_multiply",
"__mod__": "nb_remainder",
"__divmod__": "nb_divmod",
"__pow__": "nb_power",
"__lshift__": "nb_lshift",
"__rshift__": "nb_rshift",
"__and__": "nb_and",
"__xor__": "nb_xor",
"__or__": "nb_or",
"__floordiv__": "nb_floor_divide",
"__truediv__": "nb_true_divide",
"__matmul__": "nb_matrix_multiply",
}
for k,v in list(binary_number_slots.items()):
# __add__: nb_add ----> __iadd: nb_inplace_add
binary_number_slots["__i"+k[2:]] = "nb_inplace"+v[2:]
unary_number_slots = {
"__neg__": "nb_negative",
"__abs__": "nb_absolute",
}
def split_args(s):
# split args xxx,xxx, xx<xx,xx>, xx
s = s.strip()
if s=="": return []
prev = -1
presum = 0
args = []
for i in range(len(s)):
if s[i]=='<':
presum += 1
elif s[i]=='>':
presum -= 1
if presum==0 and s[i]==',':
args.append(s[prev+1:i])
prev = i
args.append(s[prev+1:])
return args
def get_def_code(df, scope_name, pyname, self_as_arg0=False):
is_fast_call = not pyname.startswith("__")
no_need_convert = pyname == "__getitem__"
args = df["args"]
# n==1 && PyXXX__CheckExact(args[0]) && ...
max_args = len(args)
min_args = max_args
for tid, a in enumerate(args):
if a[2] != "":
min_args = tid
break
arg_names = [ f"args[{i}]" for i in range(len(args))]
if self_as_arg0:
max_args -= 1
min_args -= 1
arg_names = ["self"] + arg_names[:-1]
kw_args_id = []
for aid, arg in enumerate(args):
if "VarHolder*" != arg[0] and is_fast_call:
kw_args_id.append(aid)
func_quick_check_runable = ""
func_quick_check_size = f"n<={max_args} && n>={min_args}"
if len(kw_args_id):
func_quick_check_size = f"n+(kw?Py_SIZE(kw):0)<={max_args} && n+(kw?Py_SIZE(kw):0)>={min_args}"
fill_with_default = ""
func_args_convert = ""
func_call = df["func_name"]+"("
pytypes = [ get_pytype_map(a[0],0) for a in args ]
for tid, tpc in enumerate(pytypes):
check = get_pytype_map(args[tid][0],2)
default_arg = args[tid][2]
jtp = args[tid][0]
holder_dec = ""
holder_set = ""
if jtp == "VarHolder*":
holder_dec = f"unique_ptr<VarHolder> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
if len(default_arg):
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid};
if (n>{tid-self_as_arg0}) {{
CHECK(({check}({arg_names[tid]})));
arg{tid} = {tpc}({arg_names[tid]}{holder_set});
arg_filled |= 1ull << {tid};
}}
"""
fill_with_default += f"""
if (!(arg_filled & (1ull<<{tid}))) {{
arg{tid} = {default_arg};
}}
"""
else:
func_quick_check_runable += f" && {check}({arg_names[tid]})"
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid} = {tpc}({arg_names[tid]}{holder_set});
"""
if tid: func_call += ","
if args[tid][3].endswith("&&"):
func_call += f"move(arg{tid})"
else:
func_call += f"arg{tid}"
if pyname == "__richcmp__":
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if rname in df["attrs"]:
func_quick_check_runable += " && op==Py_"+rname[2:-2].upper()
# fill args with keyword arguments
fill_with_kw = ""
if is_fast_call and len(kw_args_id):
fill_with_kw = f"""
if (kw) {{
auto kw_n = Py_SIZE(kw);
for (int i=0; i<kw_n; i++) {{
auto ko = PyTuple_GET_ITEM(kw, i);
auto vo = args[i+n];
auto ks = PyUnicode_AsUTF8(ko);
uint khash = hash(ks);
{"".join([
f'''
if (khash == {get_hash(args[aid][1])}u) {{
// hash match {args[aid][1]}
CHECK(({get_pytype_map(args[aid][0],2)}(vo)));
arg{aid} = {pytypes[aid]}(vo);
arg_filled |= 1ull << {aid};
continue;
}}
'''
for aid in kw_args_id
])}
LOGf << "Not a valid keyword:" << ks;
}}
}}
"""
if len(args):
func_args_convert += """
CHECK(!PyErr_Occurred());
"""
func_call += ")"
if df["is_property"]:
if pyname.startswith("__get__"):
func_call = df["func_name"]
else:
assert pyname.startswith("__set__"), pyname
func_call = df["func_name"] + "= arg0"
has_return = df["return_t"]!="void" and df["return_t"]!=""
# add XXX::xxx or XXX->xxx if is class def
if df["is_scope_def"]:
if df["is_static"]:
func_call = f"{scope_name}::" + func_call
else:
func_call = f"(GET_RAW_PTR({scope_name},self))->" + func_call
if pyname == "__init__":
# XXX->xxx(...) ---> new XXX xxx(...)
assert "->" in func_call
func_call = "new " + func_call.replace("->", " ")
if no_need_convert:
func_quick_check_runable = ""
func_args_convert = ""
fill_with_kw = fill_with_default = ""
return (
func_quick_check_size + func_quick_check_runable,
func_args_convert,
fill_with_kw+fill_with_default,
func_call,
has_return
)
hash_to_key_map = {}
def get_hash(s):
mask = (1<<32)-1
v=0
mul = 1
for c in s:
v += mul * ord(c)
mul *= 55
v &= mask
mul &= mask
if v in hash_to_key_map:
assert hash_to_key_map[v] == s, \
f"hash conflict {hash_to_key_map[v]} {s} {hash_to_key_map}"
hash_to_key_map[v] = s
return v
reg = re.compile(
'(/\\*(.*?)\\*/\\s*)?(//\\s*@pyjt\\(([^\\n]*)\\)\\s*)'
# ^^^^^^^^^^^^^^^^^ ^^^^ ^^^^
# doc string $1 pyjt args $3
+
'(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
# ^^^^^ ^^^^^^^
# attrs args $5
, re.DOTALL)
def compile_src(src, h, basename):
res = list(reg.finditer(src, re.S))
if len(res)==0: return
class_ranges = None
class_name = None
class_info = None
submodule_name = None
submodule_ranges = None
submodule_info = None
defs = []
LOG.vv(("find in", h))
for x in res:
LOG.vvv((x, x.groups()))
g = x.groups()
doc = g[1]
pyjt = g[3]
attrs = g[5]
esplit = lambda x: [] if x==None else \
[ a.strip() for a in x.split(",") if len(a.strip()) ]
attrs = parse_attrs(attrs)
pynames = esplit(pyjt)
end = x.end()
def find_bc(i):
while src[i] not in "({;":
i += 1
j = i+1
if src[i]==';':
return i, j
presum = 1
while True:
if src[j] in "({[":
presum += 1
elif src[j] in ")}]":
presum -= 1
if presum==0:
s = src[i]+src[j]
assert s in ("()","{}","()"), "braces not match "+s
return i, j
j += 1
# // @pyjt(DType)
# struct DType {
# ^ --> a
# .....
# } <--- b
# or
# // @pyjt(hash)
# inline uint hash(const char* input)
# ^ --> a ^ --> b
a, b = find_bc(end)
is_property = 0
if src[a] == ';':
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
is_property = 1
if src[a] == '{':
assert len(pynames)==1
if "submodule" in attrs:
assert submodule_ranges==None
submodule_ranges = (a, b)
submodule_name = src[end:a-1].strip().split()[-1]
submodule_info = {
"pynames": pynames,
"attrs": attrs
}
continue
assert class_ranges==None
class_ranges = (a, b)
class_name = src[end:a-1].strip().split()[-1]
class_info = {
"pynames": pynames,
"attrs": attrs
}
continue
is_scope_def = False
is_static = False
scope_name = ""
if class_ranges != None:
if class_ranges[0] < a and a < class_ranges[1]:
is_scope_def = True
scope_name = class_name
if submodule_ranges != None:
if submodule_ranges[0] < a and a < submodule_ranges[1]:
is_scope_def = True
scope_name = submodule_name
is_static = True
dec = src[end:b+1].strip()
arr = src[end:a].strip().split()
func_name = arr[-1]
is_constructor = False
if is_scope_def and func_name==class_name:
is_constructor = True
args = []
for arg in split_args(src[a+1:b]):
if arg=="": continue
default = ""
if "=" in arg:
arg, default = arg.split('=')
default = default
arg = arg.strip()
name = arg.split(' ')[-1]
tp = arg[:-len(name)]
tp = tp.strip()
prev_tp = tp
# const string& ----> string
if tp.startswith("const") and tp.endswith("&"):
tp = tp[5:-1].strip()
# T&& -> T
if tp.endswith("&&"):
tp = tp[:-2].strip()
# ArrayArgs& -> ArrayArgs
if tp.endswith("&"):
tp = tp[:-1].strip()
args.append((tp, name.strip(), default.strip(), prev_tp))
return_t = ""
for a in arr[:-1]:
if a in ["", "inline", "constexpr"]: continue
if a == "static":
is_static = True
continue
if return_t != "": return_t += " "
return_t += a
if is_scope_def and class_info and "submodule" in class_info["attrs"]:
is_static = True
for pid, pyname in enumerate(pynames):
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if pyname.endswith(rname):
attrs[rname] = 1
pynames[pid] = pyname.replace(rname, "__richcmp__")
def_info = {
"is_scope_def": is_scope_def,
"is_constructor": is_constructor,
"is_static": is_static,
"is_property": is_property,
"func_name": func_name,
"args": args, # [(type,name,defaut), ...]
"return_t": return_t, # return type
"dec": dec, # full string of xxx(A a, B b)
"pynames": pynames, # names in @pyjt(...)
"attrs": attrs, # attrs in @attrs(...)
"doc": doc,
"scope_name": scope_name,
}
if is_property:
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
assert is_scope_def and not is_static
def_info["is_property"] = 1
def_info["pynames"] = ["__get__"+n for n in pynames]
assert return_t != "void"
defs.append(dict(def_info))
def_info["pynames"] = ["__set__"+n for n in pynames]
assert len(args) == 0
def_info["args"] = [(def_info["return_t"], func_name, "", "")]
def_info["return_t"] = "void"
defs.append(dict(def_info))
continue
else:
defs.append(def_info)
LOG.vvv(json.dumps(def_info, indent=4))
# deal with defs
if len(defs) == 0: return
# include_name = h[4:] # remove "src/" prefix
include_name = h
code = []
class_defs_code = []
class_getsets_code = []
class_gets = OrderedDict()
class_sets = OrderedDict()
class_slots_code = []
submodule_defs_code = []
def_targets = OrderedDict()
for df in defs:
for name in df["pynames"]:
if df["is_scope_def"] and '.' not in name:
if df["scope_name"] == class_name:
name = class_info["pynames"][0] + '.' + name
else:
name = submodule_info["pynames"][0] + '.' + name
if name not in def_targets:
def_targets[name] = []
def_targets[name].append(df)
for name in def_targets:
dfs = def_targets[name]
target_scope_name = None
LOG.vv(name)
if "." in name:
target_scope_name, name = name.split(".")
# array for each df:
arr_func_quick_check_runable = []
arr_func_args_convert = []
arr_fill_with_default = []
arr_func_call = []
arr_has_return = []
self_as_arg0 = False
for df in dfs:
self_as_arg0 = class_info and \
target_scope_name == class_info["pynames"][0] and \
df["scope_name"] == submodule_name \
and not name.startswith("__")
res = get_def_code(df, df["scope_name"], name, bool(self_as_arg0))
arr_func_quick_check_runable.append(res[0])
arr_func_args_convert.append(res[1])
arr_fill_with_default.append(res[2])
arr_func_call.append(res[3])
arr_has_return.append(res[4])
slot_name = None
func_cast = ""
func_fill = ""
if name == "__init__":
slot_name = "tp_init"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> int"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__repr__":
slot_name = "tp_repr"
func_head = "(PyObject* self) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__get__"):
slot_name = "tp_gets"
name = name[len("__get__"):]
func_head = "(PyObject* self, void*) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__set__"):
slot_name = "tp_sets"
name = name[len("__set__"):]
func_head = "(PyObject* self, PyObject* arg, void*) -> int"
func_fill = """
int64 n=1;
PyObject** args = &arg;
(void)n, (void)args;
"""
elif name == "__call__":
slot_name = "tp_call"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> PyObject*"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__dealloc__":
slot_name = "tp_dealloc"
func_head = "(PyObject* self) -> void"
func_fill = "int64 n = 0"
elif name in binary_number_slots:
slot_name = "tp_as_number->"+binary_number_slots[name]
func_head = "(PyObject* self, PyObject* b) -> PyObject*"
if name.endswith("pow__"):
func_head = "(PyObject* self, PyObject* b, PyObject*) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name in unary_number_slots:
slot_name = "tp_as_number->"+unary_number_slots[name]
func_head = "(PyObject* self) -> PyObject*"
func_fill = """
int64 n = 1;
PyObject* args[] = {self};
(void)n, (void)args;
"""
elif name == "__richcmp__":
slot_name = "tp_richcompare"
func_head = "(PyObject* self, PyObject* b, int op) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name == "__len__":
slot_name = "tp_as_sequence->sq_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__map_len__":
slot_name = "tp_as_mapping->mp_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__getitem__":
slot_name = "tp_as_sequence->sq_item"
func_head = "(PyObject* self, Py_ssize_t arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
(void)n;
if (arg0 >= GET_RAW_PTR({dfs[0]["scope_name"]},self)->size()) {{
PyErr_SetString(PyExc_IndexError, "");
return 0;
}}
"""
elif name == "__map_getitem__":
slot_name = "tp_as_mapping->mp_subscript"
func_head = "(PyObject* self, PyObject* arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
PyObject* args[] = {{arg0}};
(void)n;
"""
elif name.startswith("__"):
LOG.f(f"Not support slot {name}")
continue
else:
func_head = "(PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*"
func_cast = f"(PyCFunction)(PyObject* (*)(PyObject*,PyObject**,int64,PyObject*))"
# if not return, return py_none
arr_has_return = [ True for _ in arr_has_return ]
arr_func_return = []
doc_all = ""
decs = "Declarations:\n"
for did, has_return in enumerate(arr_has_return):
df = dfs[did]
func_call = arr_func_call[did]
if df["doc"]:
doc_all += "Document:\n"
doc_all += df["doc"]
doc_all += "\nDeclaration:\n"
doc_all += df["dec"]
decs += df["dec"]+'\n'
if has_return:
assert "-> int" not in func_head
if "-> PyObject*" in func_head:
if "return_self" in df["attrs"]:
arr_func_return.append(
f"return (({func_call}), Py_INCREF(self), self)")
else:
arr_func_return.append(
f"return {get_pytype_map(df['return_t'],1)}(({func_call}))")
func_return_failed = "return nullptr"
else:
arr_func_return.append(
f"return ({func_call});")
func_return_failed = "return -1"
else:
if "-> int" in func_head:
arr_func_return.append(f"return ({func_call},0)")
func_return_failed = "return -1"
else:
assert "-> void" in func_head
arr_func_return.append(f"{func_call};return")
func_return_failed = "return"
func = f"""
{func_cast}[]{func_head} {{
try {{
{func_fill};
uint64 arg_filled=0;
(void)arg_filled;
{"".join([f'''
if ({arr_func_quick_check_runable[did]}) {{
{arr_func_args_convert[did]};
{arr_fill_with_default[did]};
{arr_func_return[did]};
}}
'''
for did in range(len(arr_func_return))
])}
LOGf << "Not a valid call";
}} catch (const std::exception& e) {{
PyErr_Format(PyExc_RuntimeError, "%s\\n%s",
e.what(),
R""({decs})""
);
}}
{func_return_failed};
}}
"""
if slot_name:
if slot_name=="tp_gets":
class_gets[name] = {
"func": func,
"doc": doc_all
}
continue
if slot_name=="tp_sets":
class_sets[name] = {
"func": func,
"doc": ""
}
continue
class_slots_code.append(f"""
tp.{slot_name} = {func};
""")
continue
need_static = ""
if df["is_scope_def"] and df["is_static"] and \
df["scope_name"] == class_name and \
"submodule" not in class_info["attrs"]:
need_static = " | METH_STATIC"
func = (f"""
{{ R""({name})"",
{func},
METH_FASTCALL | METH_KEYWORDS{need_static},
R""({doc_all})""
}}""")
if df["is_scope_def"]:
if df["scope_name"] == class_name or \
(class_info and \
target_scope_name == class_info["pynames"][0]):
class_defs_code.append(func)
else:
submodule_defs_code.append(func)
else:
code.append(func)
prop_names = list(set(class_gets.keys()).union(class_sets.keys()))
prop_names = sorted(prop_names)
for prop_name in prop_names:
get_func = "NULL"
set_func = "NULL"
doc = ""
if prop_name in class_gets:
get_func = class_gets[prop_name]["func"]
if class_gets[prop_name]["doc"]:
doc += class_gets[prop_name]["doc"]
if prop_name in class_sets:
set_func = class_sets[prop_name]["func"]
if class_sets[prop_name]["doc"]:
doc += class_sets[prop_name]["doc"]
class_getsets_code.append(f"""
{{"{prop_name}", {get_func}, {set_func}, R""({doc})""}}
""")
code.append("{0,0,0,0}")
class_defs_code.append("{0,0,0,0}")
class_getsets_code.append("{0,0,0,0}")
submodule_defs_code.append("{0,0,0,0}")
core_name = "jittor_core"
if class_info and "attrs" in class_info and "core_name" in class_info["attrs"]:
core_name = class_info["attrs"]["core_name"]
if submodule_info and "attrs" in submodule_info and "core_name" in submodule_info["attrs"]:
core_name = submodule_info["attrs"]["core_name"]
has_map = class_name in ["VarHolder", "NanoVector"]
has_seq = class_name == "NanoVector"
code = f"""
#include "pyjt/py_converter.h"
#include "common.h"
#include "{include_name}"
namespace jittor {{
{
"" if class_name is None else
f"PyHeapTypeObject Pyjt{class_name};" if "heaptype" in class_info["attrs"] else
f"PyTypeObject Pyjt{class_name};"
}
void pyjt_def_{basename}(PyObject* m) {{
static PyMethodDef defs[] = {{
{",".join(code)}
}};
ASSERT(PyModule_AddFunctions(m, defs)==0);
{
f'''
static PyMethodDef class_defs[] = {{
{",".join(class_defs_code)}
}};
static PyGetSetDef class_getsets[] = {{
{",".join(class_getsets_code)}
}};
static PyNumberMethods number_methods = {{0}};
{f"auto& htp =Pyjt{class_name}; auto& tp = htp.ht_type;"
if "heaptype" in class_info["attrs"] else
f"auto& tp = Pyjt{class_name};"}
tp.tp_as_number = &number_methods;
{f"static PyMappingMethods class_map_defs = {{0}};" if has_map else ""}
{f"tp.tp_as_mapping = &class_map_defs;" if has_map else ""}
{f"static PySequenceMethods class_seq_defs = {{0}};" if has_seq else ""}
{f"tp.tp_as_sequence = &class_seq_defs;" if has_seq else ""}
tp.tp_name = "{core_name}.{class_info["pynames"][0]}";
tp.tp_basicsize = GET_OBJ_SIZE({class_name});
tp.tp_new = PyType_GenericNew;
tp.tp_flags = Py_TPFLAGS_DEFAULT;
{"tp.tp_flags |= Py_TPFLAGS_HEAPTYPE; htp.ht_name = htp.ht_qualname = to_py_object<string>(tp.tp_name);"
if "heaptype" in class_info["attrs"] else ""}
tp.tp_methods = &class_defs[0];
tp.tp_getset = &class_getsets[0];
{"".join(class_slots_code)};
ASSERT(0==PyType_Ready(&tp)) << (PyErr_Print(), 0);
Py_INCREF(&tp);
ASSERT(0==PyModule_AddObject(m, "{class_info["pynames"][0]}", (PyObject*)&tp));
''' if class_name is not None else ""
}
{f'''
// sub module def
static PyMethodDef submodule_defs[] = {{
{",".join(submodule_defs_code)}
}};
auto sub = PyImport_AddModule("{core_name}.{submodule_info["pynames"][0]}");
ASSERT(PyModule_AddFunctions(sub, submodule_defs)==0);
ASSERT(sub);
ASSERT(0==PyModule_AddObject(m, "{submodule_info["pynames"][0]}", sub));
''' if submodule_name is not None else ""
}
}}
}}
"""
return code
def compile_single(head_file_name, src_file_name, src=None):
basename = head_file_name.split("/")[-1].split(".")[0]
if src==None:
with open(head_file_name, 'r') as f:
src = f.read()
code = compile_src(src, head_file_name, basename)
if not code: return False
LOG.vvv("write to", src_file_name)
LOG.vvvv(code)
with open(src_file_name, 'w') as f:
f.write(code)
return True
def compile(cache_path, jittor_path):
headers1 = run_cmd('find -L src/ | grep ".h$"', jittor_path).splitlines()
headers2 = run_cmd('find gen/ | grep ".h$"', cache_path).splitlines()
headers = [ os.path.join(jittor_path, h) for h in headers1 ] + \
[ os.path.join(cache_path, h) for h in headers2 ]
basenames = []
for h in headers:
with open(h, 'r') as f:
src = f.read()
# jit_op_maker.h merge compile with var_holder.h
if h.endswith("src/var_holder.h"): continue
if h.endswith("jit_op_maker.h"):
with open(os.path.join(jittor_path, "src", "var_holder.h"), "r") as f:
src = f.read() + src
basename = h.split("/")[-1].split(".")[0]
fname = "pyjt_"+basename+".cc"
fname = os.path.join(cache_path, "gen", fname)
check = compile_single(h, fname, src)
if not check: continue
basenames.append(basename)
code = f"""
#include "pyjt/numpy.h"
#include "pyjt/py_converter.h"
#include "common.h"
namespace jittor {{
{ " ".join([f"extern void pyjt_def_{n}(PyObject* m);" for n in basenames])}
void pyjt_def_all(PyObject* m) {{
numpy_init();
{ " ".join([f"pyjt_def_{n}(m);" for n in basenames])}
}}
}}
"""
fname = os.path.join(cache_path, "gen", "pyjt_all.cc")
LOG.vvv(("write to", fname))
LOG.vvvv(code)
with open(fname, "w") as f:
f.write(code)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_showdoc.ipynb (unless otherwise specified).
__all__ = ['is_enum', 'is_lib_module', 're_digits_first', 'try_external_doc_link', 'is_doc_name', 'doc_link',
'add_doc_links', 'get_source_link', 'colab_link', 'get_nb_source_link', 'nb_source_link', 'type_repr',
'format_param', 'show_doc', 'md2html', 'get_doc_link', 'doc']
# Cell
from .imports import *
from .export import *
from .sync import *
from nbconvert import HTMLExporter
from fastcore.utils import IN_NOTEBOOK
if IN_NOTEBOOK:
from IPython.display import Markdown,display
from IPython.core import page
# Cell
def is_enum(cls):
"Check if `cls` is an enum or another type of class"
return type(cls) in (enum.Enum, enum.EnumMeta)
# Cell
def is_lib_module(name):
"Test if `name` is a library module."
if name.startswith('_'): return False
try:
_ = importlib.import_module(f'{Config().lib_name}.{name}')
return True
except: return False
# Cell
re_digits_first = re.compile('^[0-9]+[a-z]*_')
# Cell
def try_external_doc_link(name, packages):
"Try to find a doc link for `name` in `packages`"
for p in packages:
try:
mod = importlib.import_module(f"{p}._nbdev")
try_pack = source_nb(name, is_name=True, mod=mod)
if try_pack:
page = re_digits_first.sub('', try_pack).replace('.ipynb', '')
return f'{mod.doc_url}{page}#{name}'
except ModuleNotFoundError: return None
# Cell
def is_doc_name(name):
"Test if `name` corresponds to a notebook that could be converted to a doc page"
for f in Config().path("nbs_path").glob(f'*{name}.ipynb'):
if re_digits_first.sub('', f.name) == f'{name}.ipynb': return True
return False
# Cell
def doc_link(name, include_bt=True):
"Create link to documentation for `name`."
cname = f'`{name}`' if include_bt else name
try:
#Link to modules
if is_lib_module(name) and is_doc_name(name): return f"[{cname}]({Config().doc_baseurl}{name}.html)"
#Link to local functions
try_local = source_nb(name, is_name=True)
if try_local:
page = re_digits_first.sub('', try_local).replace('.ipynb', '')
return f'[{cname}]({Config().doc_baseurl}{page}.html#{name})'
##Custom links
mod = get_nbdev_module()
link = mod.custom_doc_links(name)
return f'[{cname}]({link})' if link is not None else cname
except: return cname
# Cell
_re_backticks = re.compile(r"""
# Catches any link of the form \[`obj`\](old_link) or just `obj`,
# to either update old links or add the link to the docs of obj
\[` # Opening [ and `
([^`]*) # Catching group with anything but a `
`\] # ` then closing ]
(?: # Beginning of non-catching group
\( # Opening (
[^)]* # Anything but a closing )
\) # Closing )
) # End of non-catching group
| # OR
` # Opening `
([^`]*) # Anything but a `
` # Closing `
""", re.VERBOSE)
# Cell
def add_doc_links(text, elt=None):
"Search for doc links for any item between backticks in `text` and insert them"
def _replace_link(m):
try:
if m.group(2) in inspect.signature(elt).parameters: return f'`{m.group(2)}`'
except: pass
return doc_link(m.group(1) or m.group(2))
return _re_backticks.sub(_replace_link, text)
# Cell
def _is_type_dispatch(x): return type(x).__name__ == "TypeDispatch"
def _unwrapped_type_dispatch_func(x): return x.first() if _is_type_dispatch(x) else x
def _is_property(x): return type(x)==property
def _has_property_getter(x): return _is_property(x) and hasattr(x, 'fget') and hasattr(x.fget, 'func')
def _property_getter(x): return x.fget.func if _has_property_getter(x) else x
def _unwrapped_func(x):
x = _unwrapped_type_dispatch_func(x)
x = _property_getter(x)
return x
# Cell
def get_source_link(func):
"Return link to `func` in source code"
func = _unwrapped_func(func)
try: line = inspect.getsourcelines(func)[1]
except Exception: return ''
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
return f"{nbdev_mod.git_url}{module}#L{line}"
except: return f"{module}#L{line}"
# Cell
_re_header = re.compile(r"""
# Catches any header in markdown with the title in group 1
^\s* # Beginning of text followed by any number of whitespace
\#+ # One # or more
\s* # Any number of whitespace
(.*) # Catching group with anything
$ # End of text
""", re.VERBOSE)
# Cell
def colab_link(path):
"Get a link to the notebook at `path` on Colab"
cfg = Config()
res = f'https://colab.research.google.com/github/{cfg.user}/{cfg.lib_name}/blob/{cfg.branch}/{cfg.path('nbs_path').name}/{path}.ipynb'
display(Markdown(f'[Open `{path}` in Colab]({res})'))
# Cell
def get_nb_source_link(func, local=False, is_name=None):
"Return a link to the notebook where `func` is defined."
func = _unwrapped_type_dispatch_func(func)
pref = '' if local else Config().git_url.replace('github.com', 'nbviewer.jupyter.org/github')+ Config().path("nbs_path").name+'/'
is_name = is_name or isinstance(func, str)
src = source_nb(func, is_name=is_name, return_all=True)
if src is None: return '' if is_name else get_source_link(func)
find_name,nb_name = src
nb = read_nb(nb_name)
pat = re.compile(f'^{find_name}\s+=|^(def|class)\s+{find_name}\s*\(', re.MULTILINE)
if len(find_name.split('.')) == 2:
clas,func = find_name.split('.')
pat2 = re.compile(f'@patch\s*\ndef\s+{func}\s*\([^:]*:\s*{clas}\s*(?:,|\))')
else: pat2 = None
for i,cell in enumerate(nb['cells']):
if cell['cell_type'] == 'code':
if re.search(pat, cell['source']): break
if pat2 is not None and re.search(pat2, cell['source']): break
if re.search(pat, cell['source']) is None and (pat2 is not None and re.search(pat2, cell['source']) is None):
return '' if is_name else get_function_source(func)
header_pat = re.compile(r'^\s*#+\s*(.*)$')
while i >= 0:
cell = nb['cells'][i]
if cell['cell_type'] == 'markdown' and _re_header.search(cell['source']):
title = _re_header.search(cell['source']).groups()[0]
anchor = '-'.join([s for s in title.split(' ') if len(s) > 0])
return f'{pref}{nb_name}#{anchor}'
i-=1
return f'{pref}{nb_name}'
# Cell
def nb_source_link(func, is_name=None, disp=True, local=True):
"Show a relative link to the notebook where `func` is defined"
is_name = is_name or isinstance(func, str)
func_name = func if is_name else qual_name(func)
link = get_nb_source_link(func, local=local, is_name=is_name)
text = func_name if local else f'{func_name} (GitHub)'
if disp: display(Markdown(f'[{text}]({link})'))
else: return link
# Cell
from fastcore.script import Param
# Cell
def type_repr(t):
"Representation of type `t` (in a type annotation)"
if (isinstance(t, Param)): return f'"{t.help}"'
if getattr(t, '__args__', None):
args = t.__args__
if len(args)==2 and args[1] == type(None):
return f'`Optional`\[{type_repr(args[0])}\]'
reprs = ', '.join([type_repr(o) for o in args])
return f'{doc_link(get_name(t))}\[{reprs}\]'
else: return doc_link(get_name(t))
# Cell
_arg_prefixes = {inspect._VAR_POSITIONAL: '\*', inspect._VAR_KEYWORD:'\*\*'}
def format_param(p):
"Formats function param to `param:Type=val` with font weights: param=bold, val=italic"
arg_prefix = _arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs
res = f"**{arg_prefix}`{p.name}`**"
if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{type_repr(p.annotation)}'
if p.default != p.empty:
default = getattr(p.default, 'func', p.default) #For partials
if hasattr(default,'__name__'): default = getattr(default, '__name__')
else: default = repr(default)
if is_enum(default.__class__): #Enum have a crappy repr
res += f'=*`{default.__class__.__name__}.{default.name}`*'
else: res += f'=*`{default}`*'
return res
# Cell
def _format_enum_doc(enum, full_name):
"Formatted `enum` definition to show in documentation"
vals = ', '.join(enum.__members__.keys())
return f'<code>{full_name}</code>',f'<code>Enum</code> = [{vals}]'
# Cell
def _escape_chars(s):
return s.replace('_', '\_')
def _format_func_doc(func, full_name=None):
"Formatted `func` definition to show in documentation"
try:
sig = inspect.signature(func)
fmt_params = [format_param(param) for name,param
in sig.parameters.items() if name not in ('self','cls')]
except: fmt_params = []
name = f'<code>{full_name or func.__name__}</code>'
arg_str = f"({", ".join(fmt_params)})"
f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name
return f'{f_name}',f'{name}{arg_str}'
# Cell
def _format_cls_doc(cls, full_name):
"Formatted `cls` definition to show in documentation"
parent_class = inspect.getclasstree([cls])[-1][0][1][0]
name,args = _format_func_doc(cls, full_name)
if parent_class != object: args += f' :: {doc_link(get_name(parent_class))}'
return name,args
# Cell
def show_doc(elt, doc_string=True, name=None, title_level=None, disp=True, default_cls_level=2):
"Show documentation for element `elt`. Supported types: class, function, and enum."
elt = getattr(elt, '__func__', elt)
qname = name or qual_name(elt)
if inspect.isclass(elt):
if is_enum(elt): name,args = _format_enum_doc(elt, qname)
else: name,args = _format_cls_doc (elt, qname)
elif callable(elt): name,args = _format_func_doc(elt, qname)
else: name,args = f"<code>{qname}</code>", ''
link = get_source_link(elt)
source_link = f'<a href="{link}" class="source_link" style="float:right">[source]</a>'
title_level = title_level or (default_cls_level if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{qname}" class="doc_header">{name}{source_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n' if len(args) > 0 else '\n\n'
if doc_string and inspect.getdoc(elt):
s = inspect.getdoc(elt)
# show_doc is used by doc so should not rely on Config
try: monospace = (Config().get('monospace_docstrings') == 'True')
except: monospace = False
# doc links don't work inside markdown pre/code blocks
s = f'```\n{s}\n```' if monospace else add_doc_links(s, elt)
doc += s
if disp: display(Markdown(doc))
else: return doc
# Cell
def md2html(md):
"Convert markdown `md` to HTML code"
import nbconvert
if nbconvert.__version__ < '5.5.0': return HTMLExporter().markdown2html(md)
else: return HTMLExporter().markdown2html(collections.defaultdict(lambda: collections.defaultdict(dict)), md)
# Cell
def get_doc_link(func):
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
try_pack = source_nb(func, mod=nbdev_mod)
if try_pack:
page = '.'.join(try_pack.partition('_')[-1:]).replace('.ipynb', '')
return f'{nbdev_mod.doc_url}{page}#{qual_name(func)}'
except: return None
# Cell
def doc(elt):
"Show `show_doc` info in preview window when used in a notebook"
md = show_doc(elt, disp=False)
doc_link = get_doc_link(elt)
if doc_link is not None:
md += f'\n\n<a href="{doc_link}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
output = md2html(md)
if IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output)
else:
try: page.page({'text/html': output})
except: display(Markdown(md)) | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_showdoc.ipynb (unless otherwise specified).
__all__ = ['is_enum', 'is_lib_module', 're_digits_first', 'try_external_doc_link', 'is_doc_name', 'doc_link',
'add_doc_links', 'get_source_link', 'colab_link', 'get_nb_source_link', 'nb_source_link', 'type_repr',
'format_param', 'show_doc', 'md2html', 'get_doc_link', 'doc']
# Cell
from .imports import *
from .export import *
from .sync import *
from nbconvert import HTMLExporter
from fastcore.utils import IN_NOTEBOOK
if IN_NOTEBOOK:
from IPython.display import Markdown,display
from IPython.core import page
# Cell
def is_enum(cls):
"Check if `cls` is an enum or another type of class"
return type(cls) in (enum.Enum, enum.EnumMeta)
# Cell
def is_lib_module(name):
"Test if `name` is a library module."
if name.startswith('_'): return False
try:
_ = importlib.import_module(f'{Config().lib_name}.{name}')
return True
except: return False
# Cell
re_digits_first = re.compile('^[0-9]+[a-z]*_')
# Cell
def try_external_doc_link(name, packages):
"Try to find a doc link for `name` in `packages`"
for p in packages:
try:
mod = importlib.import_module(f"{p}._nbdev")
try_pack = source_nb(name, is_name=True, mod=mod)
if try_pack:
page = re_digits_first.sub('', try_pack).replace('.ipynb', '')
return f'{mod.doc_url}{page}#{name}'
except ModuleNotFoundError: return None
# Cell
def is_doc_name(name):
"Test if `name` corresponds to a notebook that could be converted to a doc page"
for f in Config().path("nbs_path").glob(f'*{name}.ipynb'):
if re_digits_first.sub('', f.name) == f'{name}.ipynb': return True
return False
# Cell
def doc_link(name, include_bt=True):
"Create link to documentation for `name`."
cname = f'`{name}`' if include_bt else name
try:
#Link to modules
if is_lib_module(name) and is_doc_name(name): return f"[{cname}]({Config().doc_baseurl}{name}.html)"
#Link to local functions
try_local = source_nb(name, is_name=True)
if try_local:
page = re_digits_first.sub('', try_local).replace('.ipynb', '')
return f'[{cname}]({Config().doc_baseurl}{page}.html#{name})'
##Custom links
mod = get_nbdev_module()
link = mod.custom_doc_links(name)
return f'[{cname}]({link})' if link is not None else cname
except: return cname
# Cell
_re_backticks = re.compile(r"""
# Catches any link of the form \[`obj`\](old_link) or just `obj`,
# to either update old links or add the link to the docs of obj
\[` # Opening [ and `
([^`]*) # Catching group with anything but a `
`\] # ` then closing ]
(?: # Beginning of non-catching group
\( # Opening (
[^)]* # Anything but a closing )
\) # Closing )
) # End of non-catching group
| # OR
` # Opening `
([^`]*) # Anything but a `
` # Closing `
""", re.VERBOSE)
# Cell
def add_doc_links(text, elt=None):
"Search for doc links for any item between backticks in `text` and insert them"
def _replace_link(m):
try:
if m.group(2) in inspect.signature(elt).parameters: return f'`{m.group(2)}`'
except: pass
return doc_link(m.group(1) or m.group(2))
return _re_backticks.sub(_replace_link, text)
# Cell
def _is_type_dispatch(x): return type(x).__name__ == "TypeDispatch"
def _unwrapped_type_dispatch_func(x): return x.first() if _is_type_dispatch(x) else x
def _is_property(x): return type(x)==property
def _has_property_getter(x): return _is_property(x) and hasattr(x, 'fget') and hasattr(x.fget, 'func')
def _property_getter(x): return x.fget.func if _has_property_getter(x) else x
def _unwrapped_func(x):
x = _unwrapped_type_dispatch_func(x)
x = _property_getter(x)
return x
# Cell
def get_source_link(func):
"Return link to `func` in source code"
func = _unwrapped_func(func)
try: line = inspect.getsourcelines(func)[1]
except Exception: return ''
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
return f"{nbdev_mod.git_url}{module}#L{line}"
except: return f"{module}#L{line}"
# Cell
_re_header = re.compile(r"""
# Catches any header in markdown with the title in group 1
^\s* # Beginning of text followed by any number of whitespace
\#+ # One # or more
\s* # Any number of whitespace
(.*) # Catching group with anything
$ # End of text
""", re.VERBOSE)
# Cell
def colab_link(path):
"Get a link to the notebook at `path` on Colab"
cfg = Config()
res = f'https://colab.research.google.com/github/{cfg.user}/{cfg.lib_name}/blob/{cfg.branch}/{cfg.path("nbs_path").name}/{path}.ipynb'
display(Markdown(f'[Open `{path}` in Colab]({res})'))
# Cell
def get_nb_source_link(func, local=False, is_name=None):
"Return a link to the notebook where `func` is defined."
func = _unwrapped_type_dispatch_func(func)
pref = '' if local else Config().git_url.replace('github.com', 'nbviewer.jupyter.org/github')+ Config().path("nbs_path").name+'/'
is_name = is_name or isinstance(func, str)
src = source_nb(func, is_name=is_name, return_all=True)
if src is None: return '' if is_name else get_source_link(func)
find_name,nb_name = src
nb = read_nb(nb_name)
pat = re.compile(f'^{find_name}\s+=|^(def|class)\s+{find_name}\s*\(', re.MULTILINE)
if len(find_name.split('.')) == 2:
clas,func = find_name.split('.')
pat2 = re.compile(f'@patch\s*\ndef\s+{func}\s*\([^:]*:\s*{clas}\s*(?:,|\))')
else: pat2 = None
for i,cell in enumerate(nb['cells']):
if cell['cell_type'] == 'code':
if re.search(pat, cell['source']): break
if pat2 is not None and re.search(pat2, cell['source']): break
if re.search(pat, cell['source']) is None and (pat2 is not None and re.search(pat2, cell['source']) is None):
return '' if is_name else get_function_source(func)
header_pat = re.compile(r'^\s*#+\s*(.*)$')
while i >= 0:
cell = nb['cells'][i]
if cell['cell_type'] == 'markdown' and _re_header.search(cell['source']):
title = _re_header.search(cell['source']).groups()[0]
anchor = '-'.join([s for s in title.split(' ') if len(s) > 0])
return f'{pref}{nb_name}#{anchor}'
i-=1
return f'{pref}{nb_name}'
# Cell
def nb_source_link(func, is_name=None, disp=True, local=True):
"Show a relative link to the notebook where `func` is defined"
is_name = is_name or isinstance(func, str)
func_name = func if is_name else qual_name(func)
link = get_nb_source_link(func, local=local, is_name=is_name)
text = func_name if local else f'{func_name} (GitHub)'
if disp: display(Markdown(f'[{text}]({link})'))
else: return link
# Cell
from fastcore.script import Param
# Cell
def type_repr(t):
"Representation of type `t` (in a type annotation)"
if (isinstance(t, Param)): return f'"{t.help}"'
if getattr(t, '__args__', None):
args = t.__args__
if len(args)==2 and args[1] == type(None):
return f'`Optional`\[{type_repr(args[0])}\]'
reprs = ', '.join([type_repr(o) for o in args])
return f'{doc_link(get_name(t))}\[{reprs}\]'
else: return doc_link(get_name(t))
# Cell
_arg_prefixes = {inspect._VAR_POSITIONAL: '\*', inspect._VAR_KEYWORD:'\*\*'}
def format_param(p):
"Formats function param to `param:Type=val` with font weights: param=bold, val=italic"
arg_prefix = _arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs
res = f"**{arg_prefix}`{p.name}`**"
if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{type_repr(p.annotation)}'
if p.default != p.empty:
default = getattr(p.default, 'func', p.default) #For partials
if hasattr(default,'__name__'): default = getattr(default, '__name__')
else: default = repr(default)
if is_enum(default.__class__): #Enum have a crappy repr
res += f'=*`{default.__class__.__name__}.{default.name}`*'
else: res += f'=*`{default}`*'
return res
# Cell
def _format_enum_doc(enum, full_name):
"Formatted `enum` definition to show in documentation"
vals = ', '.join(enum.__members__.keys())
return f'<code>{full_name}</code>',f'<code>Enum</code> = [{vals}]'
# Cell
def _escape_chars(s):
return s.replace('_', '\_')
def _format_func_doc(func, full_name=None):
"Formatted `func` definition to show in documentation"
try:
sig = inspect.signature(func)
fmt_params = [format_param(param) for name,param
in sig.parameters.items() if name not in ('self','cls')]
except: fmt_params = []
name = f'<code>{full_name or func.__name__}</code>'
arg_str = f"({', '.join(fmt_params)})"
f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name
return f'{f_name}',f'{name}{arg_str}'
# Cell
def _format_cls_doc(cls, full_name):
"Formatted `cls` definition to show in documentation"
parent_class = inspect.getclasstree([cls])[-1][0][1][0]
name,args = _format_func_doc(cls, full_name)
if parent_class != object: args += f' :: {doc_link(get_name(parent_class))}'
return name,args
# Cell
def show_doc(elt, doc_string=True, name=None, title_level=None, disp=True, default_cls_level=2):
"Show documentation for element `elt`. Supported types: class, function, and enum."
elt = getattr(elt, '__func__', elt)
qname = name or qual_name(elt)
if inspect.isclass(elt):
if is_enum(elt): name,args = _format_enum_doc(elt, qname)
else: name,args = _format_cls_doc (elt, qname)
elif callable(elt): name,args = _format_func_doc(elt, qname)
else: name,args = f"<code>{qname}</code>", ''
link = get_source_link(elt)
source_link = f'<a href="{link}" class="source_link" style="float:right">[source]</a>'
title_level = title_level or (default_cls_level if inspect.isclass(elt) else 4)
doc = f'<h{title_level} id="{qname}" class="doc_header">{name}{source_link}</h{title_level}>'
doc += f'\n\n> {args}\n\n' if len(args) > 0 else '\n\n'
if doc_string and inspect.getdoc(elt):
s = inspect.getdoc(elt)
# show_doc is used by doc so should not rely on Config
try: monospace = (Config().get('monospace_docstrings') == 'True')
except: monospace = False
# doc links don't work inside markdown pre/code blocks
s = f'```\n{s}\n```' if monospace else add_doc_links(s, elt)
doc += s
if disp: display(Markdown(doc))
else: return doc
# Cell
def md2html(md):
"Convert markdown `md` to HTML code"
import nbconvert
if nbconvert.__version__ < '5.5.0': return HTMLExporter().markdown2html(md)
else: return HTMLExporter().markdown2html(collections.defaultdict(lambda: collections.defaultdict(dict)), md)
# Cell
def get_doc_link(func):
mod = inspect.getmodule(func)
module = mod.__name__.replace('.', '/') + '.py'
try:
nbdev_mod = importlib.import_module(mod.__package__.split('.')[0] + '._nbdev')
try_pack = source_nb(func, mod=nbdev_mod)
if try_pack:
page = '.'.join(try_pack.partition('_')[-1:]).replace('.ipynb', '')
return f'{nbdev_mod.doc_url}{page}#{qual_name(func)}'
except: return None
# Cell
def doc(elt):
"Show `show_doc` info in preview window when used in a notebook"
md = show_doc(elt, disp=False)
doc_link = get_doc_link(elt)
if doc_link is not None:
md += f'\n\n<a href="{doc_link}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
output = md2html(md)
if IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output)
else:
try: page.page({'text/html': output})
except: display(Markdown(md)) |
import numpy as np
import pandas as pd
import hail as hl
from hail.linalg import BlockMatrix
from hail.linalg.utils import _check_dims
from hail.table import Table
from hail.typecheck import typecheck_method, nullable, tupleof, oneof, numeric
from hail.utils.java import Env, info
from hail.utils.misc import plural
class LinearMixedModel(object):
r"""Class representing a linear mixed model.
.. include:: ../_templates/experimental.rst
:class:`LinearMixedModel` represents a linear model of the form
.. math::
y \sim \mathrm{N}(X \beta, \, \sigma^2 K + \tau^2 I)
where
- :math:`\mathrm{N}` is a :math:`n`-dimensional normal distribution.
- :math:`y` is a known vector of :math:`n` observations.
- :math:`X` is a known :math:`n \times p` design matrix for :math:`p` fixed effects.
- :math:`K` is a known :math:`n \times n` positive semi-definite kernel.
- :math:`I` is the :math:`n \times n` identity matrix.
- :math:`\beta` is a :math:`p`-parameter vector of fixed effects.
- :math:`\sigma^2` is the variance parameter on :math:`K`.
- :math:`\tau^2` is the variance parameter on :math:`I`.
In particular, the residuals for the :math:`i^\mathit{th}` and :math:`j^\mathit{th}`
observations have covariance :math:`\sigma^2 K_{ij}` for :math:`i \neq j`.
This model is equivalent to a
`mixed model <https://en.wikipedia.org/wiki/Mixed_model>`__
of the form
.. math::
y = X \beta + Z u + \epsilon
by setting :math:`K = ZZ^T` where
- :math:`Z` is a known :math:`n \times r` design matrix for :math:`r` random effects.
- :math:`u` is a :math:`r`-vector of random effects drawn from :math:`\mathrm{N}(0, \sigma^2 I)`.
- :math:`\epsilon` is a :math:`n`-vector of random errors drawn from :math:`\mathrm{N}(0, \tau^2 I)`.
However, :class:`LinearMixedModel` does not itself realize :math:`K` as a linear kernel
with respect to random effects, nor does it take :math:`K` explicitly as input. Rather,
via the eigendecomposion :math:`K = U S U^T`, the the class leverages a third, decorrelated
form of the model
.. math::
Py \sim \mathrm{N}(PX \beta, \, \sigma^2 (\gamma S + I))
where
- :math:`P = U^T: \mathbb{R}^n \rightarrow \mathbb{R}^n` is an orthonormal transformation
that decorrelates the observations. The rows of :math:`P` are an eigenbasis for :math:`K`.
- :math:`S` is the :math:`n \times n` diagonal matrix of corresponding eigenvalues.
- :math:`\gamma = \frac{\sigma^2}{\tau^2}` is the ratio of variance parameters.
Hence, the triple :math:`(Py, PX, S)` determines the probability
of the observations for any choice of model parameters, and is
therefore sufficient for inference.
This triple, with S encoded as a vector, is the default
("full-rank") initialization of the class.
:class:`LinearMixedModel` also provides an efficient strategy to fit the
model above with :math:`K` replaced by its rank-:math:`r` approximation
:math:`K_r = P_r^T S_r P_r` where
- :math:`P_r: \mathbb{R}^n \rightarrow \mathbb{R}^r` has orthonormal rows
consisting of the top :math:`r` eigenvectors of :math:`K`.
- :math:`S_r` is the :math:`r \times r` diagonal matrix of corresponding
non-zero eigenvalues.
For this low-rank model, the quintuple :math:`(P_r y, P_r X, S_r, y, X)`
is similarly sufficient for inference and corresponds to the "low-rank"
initialization of the class. Morally, :math:`y` and :math:`X` are
required for low-rank inference because the diagonal :math:`\gamma S + I`
is always full-rank.
If :math:`K` actually has rank :math:`r`, then :math:`K = K_r`
and the low-rank and full-rank models are equivalent.
Hence low-rank inference provides a more efficient, equally-exact
algorithm for fitting the full-rank model.
This situation arises, for example, when :math:`K` is the linear kernel
of a mixed model with fewer random effects than observations.
Even when :math:`K` has full rank, using a lower-rank approximation may
be an effective from of regularization, in addition to boosting
computational efficiency.
**Initialization**
The class may be initialized directly or with one of two methods:
- :meth:`from_kinship` takes :math:`y`, :math:`X`, and :math:`K` as ndarrays.
The model is always full-rank.
- :meth:`from_random_effects` takes :math:`y` and :math:`X` as ndarrays and
:math:`Z` as an ndarray or block matrix. The model is full-rank if and
only if :math:`n \leq m`.
Direct full-rank initialization takes :math:`Py`, :math:`PX`, and :math:`S`
as ndarrays. The following class attributes are set:
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `low_rank`
- bool
- ``False``
* - `n`
- int
- Number of observations :math:`n`
* - `f`
- int
- Number of fixed effects :math:`p`
* - `r`
- int
- Effective number of random effects, must equal :math:`n`
* - `py`
- ndarray
- Rotated response vector :math:`P y` with shape :math:`(n)`
* - `px`
- ndarray
- Rotated design matrix :math:`P X` with shape :math:`(n, p)`
* - `s`
- ndarray
- Eigenvalues vector :math:`S` of :math:`K` with shape :math:`(n)`
* - `p_path`
- str
- Path at which :math:`P` is stored as a block matrix
Direct low-rank initialization takes :math:`P_r y`, :math:`P_r X`, :math:`S_r`,
:math:`y`, and :math:`X` as ndarrays. The following class attributes are set:
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `low_rank`
- bool
- ``True``
* - `n`
- int
- Number of observations :math:`n`
* - `f`
- int
- Number of fixed effects :math:`p`
* - `r`
- int
- Effective number of random effects, must be less than :math:`n`
* - `py`
- ndarray
- Projected response vector :math:`P_r y` with shape :math:`(r)`
* - `px`
- ndarray
- Projected design matrix :math:`P_r X` with shape :math:`(r, p)`
* - `s`
- ndarray
- Eigenvalues vector :math:`S_r` of :math:`K_r` with shape :math:`(r)`
* - `y`
- ndarray
- Response vector with shape :math:`(n)`
* - `x`
- ndarray
- Design matrix with shape :math:`(n, p)`
* - `p_path`
- str
- Path at which :math:`P` is stored as a block matrix
**Fitting the model**
:meth:`fit` uses `restricted maximum likelihood
<https://en.wikipedia.org/wiki/Restricted_maximum_likelihood>`__ (REML)
to estimate :math:`(\beta, \sigma^2, \tau^2)`.
This is done by numerical optimization of the univariate function
:meth:`compute_neg_log_reml`, which itself optimizes REML constrained to a
fixed ratio of variance parameters. Each evaluation of
:meth:`compute_neg_log_reml` has computational complexity
.. math::
\mathit{O}(rp^2 + p^3).
:meth:`fit` adds the following attributes at this estimate.
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `beta`
- ndarray
- :math:`\beta`
* - `sigma_sq`
- float
- :math:`\sigma^2`
* - `tau_sq`
- float
- :math:`\tau^2`
* - `gamma`
- float
- :math:`\gamma = \frac{\sigma^2}{\tau^2}`
* - `log_gamma`
- float
- :math:`\log{\gamma}`
* - `h_sq`
- float
- :math:`\mathit{h}^2 = \frac{\sigma^2}{\sigma^2 + \tau^2}`
* - `h_sq_standard_error`
- float
- asymptotic estimate of :math:`\mathit{h}^2` standard error
**Testing alternative models**
The model is also equivalent to its augmentation
.. math::
y \sim \mathrm{N}\left(x_\star\beta_\star + X \beta, \, \sigma^2 K + \tau^2 I\right)
by an additional covariate of interest :math:`x_\star` under the
null hypothesis that the corresponding fixed effect parameter
:math:`\beta_\star` is zero. Similarly to initialization, full-rank testing
of the alternative hypothesis :math:`\beta_\star \neq 0` requires
:math:`P x_\star`, whereas the low-rank testing requires :math:`P_r x_\star`
and :math:`x_\star`.
After running :meth:`fit` to fit the null model, one can test each of a
collection of alternatives using either of two implementations of the
likelihood ratio test:
- :meth:`fit_alternatives_numpy` takes one or two ndarrays. It is a pure Python
method that evaluates alternatives serially on leader (master).
- :meth:`fit_alternatives` takes one or two paths to block matrices. It
evaluates alternatives in parallel on the workers.
Per alternative, both have computational complexity
.. math::
\mathit{O}(rp + p^3).
Parameters
----------
py: :class:`numpy.ndarray`
Projected response vector :math:`P_r y` with shape :math:`(r)`.
px: :class:`numpy.ndarray`
Projected design matrix :math:`P_r X` with shape :math:`(r, p)`.
s: :class:`numpy.ndarray`
Eigenvalues vector :math:`S` with shape :math:`(r)`.
y: :class:`numpy.ndarray`, optional
Response vector with shape :math:`(n)`.
Include for low-rank inference.
x: :class:`numpy.ndarray`, optional
Design matrix with shape :math:`(n, p)`.
Include for low-rank inference.
p_path: :class:`str`, optional
Path at which :math:`P` has been stored as a block matrix.
"""
@typecheck_method(py=np.ndarray,
px=np.ndarray,
s=np.ndarray,
y=nullable(np.ndarray),
x=nullable(np.ndarray),
p_path=nullable(str))
def __init__(self, py, px, s, y=None, x=None, p_path=None):
if y is None and x is None:
low_rank = False
elif y is not None and x is not None:
low_rank = True
else:
raise ValueError('for low-rank, set both y and x; for full-rank, do not set y or x.')
_check_dims(py, 'py', 1)
_check_dims(px, 'px', 2)
_check_dims(s, 's', 1)
r = s.size
f = px.shape[1]
if py.size != r:
raise ValueError("py and s must have the same size")
if px.shape[0] != r:
raise ValueError("px must have the same number of rows as the size of s")
if low_rank:
_check_dims(y, 'y', 1)
_check_dims(x, 'x', 2)
n = y.size
if n <= r:
raise ValueError("size of y must be larger than the size of s")
if x.shape[0] != n:
raise ValueError("x must have the same number of rows as the size of y")
if x.shape[1] != f:
raise ValueError("px and x must have the same number columns")
else:
n = r
if p_path is not None:
n_rows, n_cols = BlockMatrix.read(p_path).shape
if n_cols != n:
raise ValueError("LinearMixedModel: Number of columns in the block "
f"matrix at 'p_path' ({n_cols}) must equal "
f"the size of 'y' ({n})")
if n_rows != r:
raise ValueError("LinearMixedModel: Number of rows in the block "
f"matrix at 'p_path' ({n_rows}) must equal "
f"the size of 'py' ({r})")
self.low_rank = low_rank
self.n = n
self.f = f
self.r = r
self.py = py
self.px = px
self.s = s
self.y = y
self.x = x
self.p_path = p_path
self._check_dof()
self.beta = None
self.sigma_sq = None
self.tau_sq = None
self.gamma = None
self.log_gamma = None
self.h_sq = None
self.h_sq_standard_error = None
self.optimize_result = None
self._fitted = False
if low_rank:
self._yty = y @ y
self._xty = x.T @ y
self._xtx = x.T @ x
self._dof = n - f
self._d = None
self._ydy = None
self._xdy = None
self._xdx = None
self._dof_alt = n - (f + 1)
self._d_alt = None
self._ydy_alt = None
self._xdy_alt = np.zeros(f + 1)
self._xdx_alt = np.zeros((f + 1, f + 1))
self._residual_sq = None
self._scala_model = None
def _reset(self):
self._fitted = False
self.beta = None
self.sigma_sq = None
self.tau_sq = None
self.gamma = None
self.log_gamma = None
self.h_sq = None
self.h_sq_standard_error = None
self.optimize_result = None
def compute_neg_log_reml(self, log_gamma, return_parameters=False):
r"""Compute negative log REML constrained to a fixed value
of :math:`\log{\gamma}`.
This function computes the triple :math:`(\beta, \sigma^2, \tau^2)` with
:math:`\gamma = \frac{\sigma^2}{\tau^2}` at which the restricted
likelihood is maximized and returns the negative of the restricted log
likelihood at these parameters (shifted by the constant defined below).
The implementation has complexity :math:`\mathit{O}(rp^2 + p^3)` and is
inspired by `FaST linear mixed models for genome-wide association studies (2011)
<https://www.nature.com/articles/nmeth.1681>`__.
The formulae follow from `Bayesian Inference for Variance Components Using Only Error Contrasts (1974)
<http://faculty.dbmi.pitt.edu/day/Bioinf2132-advanced-Bayes-and-R/previousDocuments/Bioinf2132-documents-2016/2016-11-22/Harville-1974.pdf>`__.
Harville derives that for fixed covariance :math:`V`, the restricted
likelihood of the variance parameter :math:`V` in the model
.. math::
y \sim \mathrm{N}(X \beta, \, V)
is given by
.. math::
(2\pi)^{-\frac{1}{2}(n - p)}
\det(X^T X)^\frac{1}{2}
\det(V)^{-\frac{1}{2}}
\det(X^T V^{-1} X)^{-\frac{1}{2}}
e^{-\frac{1}{2}(y - X\hat\beta)^T V^{-1}(y - X\hat\beta)}.
with
.. math::
\hat\beta = (X^T V^{-1} X)^{-1} X^T V^{-1} y.
In our case, the variance is
.. math::
V = \sigma^2 K + \tau^2 I = \sigma^2 (K + \gamma^{-1} I)
which is determined up to scale by any fixed value of the ratio
:math:`\gamma`. So for input :math:`\log \gamma`, the
negative restricted log likelihood is minimized at
:math:`(\hat\beta, \hat\sigma^2)` with :math:`\hat\beta` as above and
.. math::
\hat\sigma^2 = \frac{1}{n - p}(y - X\hat\beta)^T (K + \gamma^{-1} I)^{-1}(y - X\hat\beta).
For :math:`\hat V` at this :math:`(\hat\beta, \hat\sigma^2, \gamma)`,
the exponent in the likelihood reduces to :math:`-\frac{1}{2}(n-p)`, so
the negative restricted log likelihood may be expressed as
.. math::
\frac{1}{2}\left(\log \det(\hat V) + \log\det(X^T \hat V^{-1} X)\right) + C
where
.. math::
C = \frac{1}{2}\left(n - p + (n - p)\log(2\pi) - \log\det(X^T X)\right)
only depends on :math:`X`. :meth:`compute_neg_log_reml` returns the value of
the first term, omitting the constant term.
Parameters
----------
log_gamma: :obj:`float`
Value of :math:`\log{\gamma}`.
return_parameters:
If ``True``, also return :math:`\beta`, :math:`\sigma^2`,
and :math:`\tau^2`.
Returns
-------
:obj:`float` or (:obj:`float`, :class:`numpy.ndarray`, :obj:`float`, :obj:`float`)
If `return_parameters` is ``False``, returns (shifted) negative log REML.
Otherwise, returns (shifted) negative log REML, :math:`\beta`, :math:`\sigma^2`,
and :math:`\tau^2`.
"""
from scipy.linalg import solve, LinAlgError
gamma = np.exp(log_gamma)
d = 1 / (self.s + 1 / gamma)
logdet_d = np.sum(np.log(d)) + (self.n - self.r) * log_gamma
if self.low_rank:
d -= gamma
dpy = d * self.py
ydy = self.py @ dpy + gamma * self._yty
xdy = self.px.T @ dpy + gamma * self._xty
xdx = (self.px.T * d) @ self.px + gamma * self._xtx
else:
dpy = d * self.py
ydy = self.py @ dpy
xdy = self.px.T @ dpy
xdx = (self.px.T * d) @ self.px
try:
beta = solve(xdx, xdy, assume_a='pos')
residual_sq = ydy - xdy.T @ beta
sigma_sq = residual_sq / self._dof
tau_sq = sigma_sq / gamma
neg_log_reml = (np.linalg.slogdet(xdx)[1] - logdet_d + self._dof * np.log(sigma_sq)) / 2
self._d, self._ydy, self._xdy, self._xdx = d, ydy, xdy, xdx # used in fit
if return_parameters:
return neg_log_reml, beta, sigma_sq, tau_sq
else:
return neg_log_reml
except LinAlgError as e:
raise Exception('linear algebra error while solving for REML estimate') from e
@typecheck_method(log_gamma=nullable(numeric), bounds=tupleof(numeric), tol=float, maxiter=int)
def fit(self, log_gamma=None, bounds=(-8.0, 8.0), tol=1e-8, maxiter=500):
r"""Find the triple :math:`(\beta, \sigma^2, \tau^2)` maximizing REML.
This method sets the attributes `beta`, `sigma_sq`, `tau_sq`, `gamma`,
`log_gamma`, `h_sq`, and `h_sq_standard_error` as described in the
top-level class documentation.
If `log_gamma` is provided, :meth:`fit` finds the REML solution
with :math:`\log{\gamma}` constrained to this value. In this case,
`h_sq_standard_error` is ``None`` since `h_sq` is not estimated.
Otherwise, :meth:`fit` searches for the value of :math:`\log{\gamma}`
that minimizes :meth:`compute_neg_log_reml`, and also sets the attribute
`optimize_result` of type `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`__.
Parameters
----------
log_gamma: :obj:`float`, optional
If provided, the solution is constrained to have this value of
:math:`\log{\gamma}`.
bounds: :obj:`float`, :obj:`float`
Lower and upper bounds for :math:`\log{\gamma}`.
tol: :obj:`float`
Absolute tolerance for optimizing :math:`\log{\gamma}`.
maxiter: :obj:`float`
Maximum number of iterations for optimizing :math:`\log{\gamma}`.
"""
if self._fitted:
self._reset()
fit_log_gamma = True if log_gamma is None else False
if fit_log_gamma:
from scipy.optimize import minimize_scalar
self.optimize_result = minimize_scalar(
self.compute_neg_log_reml,
method='bounded',
bounds=bounds,
options={'xatol': tol, 'maxiter': maxiter})
if self.optimize_result.success:
if self.optimize_result.x - bounds[0] < 0.001:
raise Exception("failed to fit log_gamma: optimum within 0.001 of lower bound.")
elif bounds[1] - self.optimize_result.x < 0.001:
raise Exception("failed to fit log_gamma: optimum within 0.001 of upper bound.")
else:
self.log_gamma = self.optimize_result.x
else:
raise Exception(f'failed to fit log_gamma:\n {self.optimize_result}')
else:
self.log_gamma = log_gamma
_, self.beta, self.sigma_sq, self.tau_sq = self.compute_neg_log_reml(self.log_gamma, return_parameters=True)
self.gamma = np.exp(self.log_gamma)
self.h_sq = self.sigma_sq / (self.sigma_sq + self.tau_sq)
self._residual_sq = self.sigma_sq * self._dof
self._d_alt = self._d
self._ydy_alt = self._ydy
self._xdy_alt[1:] = self._xdy
self._xdx_alt[1:, 1:] = self._xdx
if fit_log_gamma:
self.h_sq_standard_error = self._estimate_h_sq_standard_error()
self._fitted = True
def _estimate_h_sq_standard_error(self):
epsilon = 1e-4 # parabolic interpolation radius in log_gamma space
lg = self.log_gamma + np.array([-epsilon, 0.0, epsilon])
h2 = 1 / (1 + np.exp(-lg))
nll = [self.compute_neg_log_reml(lgi) for lgi in lg]
if nll[1] > nll[0] or nll[1] > nll[2]:
i = 0 if nll[1] > nll[0] else 2
raise Exception(f'Minimum of negative log likelihood fit as {nll[1]} at log_gamma={lg[1]},'
f'\n but found smaller value of {nll[i]} at log_gamma={lg[i]}.'
f'\n Investigate by plotting the negative log likelihood function.')
# Asymptotically near MLE, nLL = a * h2^2 + b * h2 + c with a = 1 / (2 * se^2)
# By Lagrange interpolation:
a = ((h2[2] * (nll[1] - nll[0]) + h2[1] * (nll[0] - nll[2]) + h2[0] * (nll[2] - nll[1]))
/ ((h2[1] - h2[0]) * (h2[0] - h2[2]) * (h2[2] - h2[1])))
return 1 / np.sqrt(2 * a)
def h_sq_normalized_lkhd(self):
r"""Estimate the normalized likelihood of :math:`\mathit{h}^2` over the
discrete grid of percentiles.
Examples
--------
Plot the estimated normalized likelihood function:
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP
Notes
-----
This method may be used to visualize the approximate posterior on
:math:`\mathit{h}^2` under a flat prior.
The resulting ndarray ``a`` has length 101 with ``a[i]`` equal to the
maximum likelihood over all :math:`\beta` and :math:`\sigma^2` with
:math:`\mathit{h}^2` constrained to ``i / 100``. The values for
``1 <= i <= 99`` are normalized to sum to 1, and ``a[0]`` and ``a[100]``
are set to ``nan``.
Returns
-------
:class:`numpy.ndarray` of :obj:`float`
Normalized likelihood values for :math:`\mathit{h}^2`.
"""
log_lkhd = np.zeros(101, dtype=np.float64)
log_lkhd[0], log_lkhd[100] = np.nan, np.nan
for h2 in range(1, 100):
gamma = h2 / (100.0 - h2)
log_lkhd[h2] = -self.compute_neg_log_reml(np.log(gamma))
log_lkhd -= np.max(log_lkhd[1:-1])
lkhd = np.exp(log_lkhd)
lkhd /= np.sum(lkhd[1:-1])
return lkhd
@typecheck_method(pa_t_path=str,
a_t_path=nullable(str),
partition_size=nullable(int))
def fit_alternatives(self, pa_t_path, a_t_path=None, partition_size=None):
r"""Fit and test alternative model for each augmented design matrix in parallel.
Notes
-----
The alternative model is fit using REML constrained to the value of
:math:`\gamma` set by :meth:`fit`.
The likelihood ratio test of fixed effect parameter :math:`\beta_\star`
uses (non-restricted) maximum likelihood:
.. math::
\chi^2 = 2 \log\left(\frac{
\max_{\beta_\star, \beta, \sigma^2}\mathrm{N}
(y \, | \, x_\star \beta_\star + X \beta; \sigma^2(K + \gamma^{-1}I)}
{\max_{\beta, \sigma^2} \mathrm{N}
(y \, | \, x_\star \cdot 0 + X \beta; \sigma^2(K + \gamma^{-1}I)}
\right)
The p-value is given by the tail probability under a chi-squared
distribution with one degree of freedom.
The resulting table has the following fields:
.. list-table::
:header-rows: 1
* - Field
- Type
- Value
* - `idx`
- int64
- Index of augmented design matrix.
* - `beta`
- float64
- :math:`\beta_\star`
* - `sigma_sq`
- float64
- :math:`\sigma^2`
* - `chi_sq`
- float64
- :math:`\chi^2`
* - `p_value`
- float64
- p-value
:math:`(P_r A)^T` and :math:`A^T` (if given) must have the same number
of rows (augmentations). These rows are grouped into partitions for
parallel processing. The number of partitions equals the ceiling of
``n_rows / partition_size``, and should be at least the number or cores
to make use of all cores. By default, there is one partition per row of
blocks in :math:`(P_r A)^T`. Setting the partition size to an exact
(rather than approximate) divisor or multiple of the block size reduces
superfluous shuffling of data.
The number of columns in each block matrix must be less than :math:`2^{31}`.
Warning
-------
The block matrices must be stored in row-major format, as results
from :meth:`.BlockMatrix.write` with ``force_row_major=True`` and from
:meth:`.BlockMatrix.write_from_entry_expr`. Otherwise, this method
will produce an error message.
Parameters
----------
pa_t_path: :class:`str`
Path to block matrix :math:`(P_r A)^T` with shape :math:`(m, r)`.
Each row is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a_t_path: :class:`str`, optional
Path to block matrix :math:`A^T` with shape :math:`(m, n)`.
Each row is an augmentation :math:`x_\star` of :math:`X`.
Include for low-rank inference.
partition_size: :obj:`int`, optional
Number of rows to process per partition.
Default given by block size of :math:`(P_r A)^T`.
Returns
-------
:class:`.Table`
Table of results for each augmented design matrix.
"""
from hail.table import Table
self._check_dof(self.f + 1)
if self.low_rank and a_t_path is None:
raise ValueError('model is low-rank so a_t is required.')
elif not (self.low_rank or a_t_path is None):
raise ValueError('model is full-rank so a_t must not be set.')
if self._scala_model is None:
self._set_scala_model()
backend = Env.spark_backend('LinearMixedModel.fit_alternatives')
jfs = backend.fs._jfs
if partition_size is None:
block_size = Env.hail().linalg.BlockMatrix.readMetadata(jfs, pa_t_path).blockSize()
partition_size = block_size
elif partition_size <= 0:
raise ValueError(f'partition_size must be positive, found {partition_size}')
jpa_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, pa_t_path, partition_size)
if a_t_path is None:
maybe_ja_t = None
else:
maybe_ja_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, a_t_path, partition_size)
return Table._from_java(backend._jbackend.pyFitLinearMixedModel(
self._scala_model, jpa_t, maybe_ja_t))
@typecheck_method(pa=np.ndarray, a=nullable(np.ndarray), return_pandas=bool)
def fit_alternatives_numpy(self, pa, a=None, return_pandas=False):
r"""Fit and test alternative model for each augmented design matrix.
Notes
-----
This Python-only implementation runs serially on leader (master). See
the scalable implementation :meth:`fit_alternatives` for documentation
of the returned table.
Parameters
----------
pa: :class:`numpy.ndarray`
Projected matrix :math:`P_r A` of alternatives with shape :math:`(r, m)`.
Each column is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a: :class:`numpy.ndarray`, optional
Matrix :math:`A` of alternatives with shape :math:`(n, m)`.
Each column is an augmentation :math:`x_\star` of :math:`X`.
Required for low-rank inference.
return_pandas: :obj:`bool`
If true, return pandas dataframe. If false, return Hail table.
Returns
-------
:class:`.Table` or :class:`.pandas.DataFrame`
Table of results for each augmented design matrix.
"""
self._check_dof(self.f + 1)
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
n_cols = pa.shape[1]
assert pa.shape[0] == self.r
if self.low_rank:
assert a.shape[0] == self.n and a.shape[1] == n_cols
data = [(i,) + self._fit_alternative_numpy(pa[:, i], a[:, i]) for i in range(n_cols)]
else:
data = [(i,) + self._fit_alternative_numpy(pa[:, i], None) for i in range(n_cols)]
df = pd.DataFrame.from_records(data, columns=['idx', 'beta', 'sigma_sq', 'chi_sq', 'p_value'])
if return_pandas:
return df
else:
return Table.from_pandas(df, key='idx')
def _fit_alternative_numpy(self, pa, a):
from scipy.linalg import solve, LinAlgError
from scipy.stats.distributions import chi2
gamma = self.gamma
dpa = self._d_alt * pa
# single thread => no need to copy
ydy = self._ydy_alt
xdy = self._xdy_alt
xdx = self._xdx_alt
if self.low_rank:
xdy[0] = self.py @ dpa + gamma * (self.y @ a)
xdx[0, 0] = pa @ dpa + gamma * (a @ a)
xdx[0, 1:] = self.px.T @ dpa + gamma * (self.x.T @ a)
else:
xdy[0] = self.py @ dpa
xdx[0, 0] = pa @ dpa
xdx[0, 1:] = self.px.T @ dpa
try:
beta = solve(xdx, xdy, assume_a='pos') # only uses upper triangle
residual_sq = ydy - xdy.T @ beta
sigma_sq = residual_sq / self._dof_alt
chi_sq = self.n * np.log(self._residual_sq / residual_sq) # division => precision
p_value = chi2.sf(chi_sq, 1)
return beta[0], sigma_sq, chi_sq, p_value
except LinAlgError:
return tuple(4 * [float('nan')])
def _set_scala_model(self):
from hail.utils.java import Env
from hail.linalg import _jarray_from_ndarray, _breeze_from_ndarray
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
self._scala_model = Env.hail().stats.LinearMixedModel.pyApply(
self.gamma,
self._residual_sq,
_jarray_from_ndarray(self.py),
_breeze_from_ndarray(self.px),
_jarray_from_ndarray(self._d_alt),
self._ydy_alt,
_jarray_from_ndarray(self._xdy_alt),
_breeze_from_ndarray(self._xdx_alt),
_jarray_from_ndarray(self.y) if self.low_rank else None,
_breeze_from_ndarray(self.x) if self.low_rank else None
)
def _check_dof(self, f=None):
if f is None:
f = self.f
dof = self.n - f
if dof <= 0:
raise ValueError(f"{self.n} {plural("observation", self.n)} with {f} fixed {plural("effect", f)} "
f"implies {dof} {plural("degree", dof)} of freedom. Must be positive.")
@classmethod
@typecheck_method(y=np.ndarray,
x=np.ndarray,
k=np.ndarray,
p_path=nullable(str),
overwrite=bool)
def from_kinship(cls, y, x, k, p_path=None, overwrite=False):
r"""Initializes a model from :math:`y`, :math:`X`, and :math:`K`.
Examples
--------
>>> from hail.stats import LinearMixedModel
>>> y = np.array([0.0, 1.0, 8.0, 9.0])
>>> x = np.array([[1.0, 0.0],
... [1.0, 2.0],
... [1.0, 1.0],
... [1.0, 4.0]])
>>> k = np.array([[ 1. , -0.8727875 , 0.96397335, 0.94512946],
... [-0.8727875 , 1. , -0.93036112, -0.97320323],
... [ 0.96397335, -0.93036112, 1. , 0.98294169],
... [ 0.94512946, -0.97320323, 0.98294169, 1. ]])
>>> model, p = LinearMixedModel.from_kinship(y, x, k)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.2525148830695317
>>> model.s # doctest: +SKIP_OUTPUT_CHECK
array([3.83501295, 0.13540343, 0.02454114, 0.00504248])
Truncate to a rank :math:`r=2` model:
>>> r = 2
>>> s_r = model.s[:r]
>>> p_r = p[:r, :]
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.25193197591429695
Notes
-----
This method eigendecomposes :math:`K = P^T S P` on the leader (master)
and returns ``LinearMixedModel(p @ y, p @ x, s)`` and ``p``.
The performance of eigendecomposition depends critically on the number
of leader (master) cores and the NumPy / SciPy configuration, viewable
with ``np.show_config()``. For Intel machines, we recommend installing
the `MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used.
Parameters
----------
y: :class:`numpy.ndarray`
:math:`n` vector of observations.
x: :class:`numpy.ndarray`
:math:`n \times p` matrix of fixed effects.
k: :class:`numpy.ndarray`
:math:`n \times n` positive semi-definite kernel :math:`K`.
p_path: :class:`str`, optional
Path at which to write :math:`P` as a block matrix.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
Returns
-------
model: :class:`LinearMixedModel`
Model constructed from :math:`y`, :math:`X`, and :math:`K`.
p: :class:`numpy.ndarray`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
"""
_check_dims(y, "y", 1)
_check_dims(x, "x", 2)
_check_dims(k, "k", 2)
n = k.shape[0]
if k.shape[1] != n:
raise ValueError("from_kinship: 'k' must be a square matrix")
if y.shape[0] != n:
raise ValueError("from_kinship: 'y' and 'k' must have the same "
"number of rows")
if x.shape[0] != n:
raise ValueError("from_kinship: 'x' and 'k' must have the same "
"number of rows")
s, u = hl.linalg._eigh(k)
if s[0] < -1e12 * s[-1]:
raise Exception("from_kinship: smallest eigenvalue of 'k' is"
f"negative: {s[0]}")
# flip singular values to descending order
s = np.flip(s, axis=0)
u = np.fliplr(u)
p = u.T
if p_path:
BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)
model = LinearMixedModel(p @ y, p @ x, s, p_path=p_path)
return model, p
@classmethod
@typecheck_method(y=np.ndarray,
x=np.ndarray,
z=oneof(np.ndarray, hl.linalg.BlockMatrix),
p_path=nullable(str),
overwrite=bool,
max_condition_number=float,
complexity_bound=int)
def from_random_effects(cls, y, x, z,
p_path=None,
overwrite=False,
max_condition_number=1e-10,
complexity_bound=8192):
r"""Initializes a model from :math:`y`, :math:`X`, and :math:`Z`.
Examples
--------
>>> from hail.stats import LinearMixedModel
>>> y = np.array([0.0, 1.0, 8.0, 9.0])
>>> x = np.array([[1.0, 0.0],
... [1.0, 2.0],
... [1.0, 1.0],
... [1.0, 4.0]])
>>> z = np.array([[0.0, 0.0, 1.0],
... [0.0, 1.0, 2.0],
... [1.0, 2.0, 4.0],
... [2.0, 4.0, 8.0]])
>>> model, p = LinearMixedModel.from_random_effects(y, x, z)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.38205307244271675
Notes
-----
If :math:`n \leq m`, the returned model is full rank.
If :math:`n > m`, the returned model is low rank. In this case only,
eigenvalues less than or equal to `max_condition_number` times the top
eigenvalue are dropped from :math:`S`, with the corresponding
eigenvectors dropped from :math:`P`. This guards against precision
loss on left eigenvectors computed via the right gramian :math:`Z^T Z`
in :meth:`.BlockMatrix.svd`.
In either case, one can truncate to a rank :math:`r` model as follows.
If `p` is an ndarray:
>>> p_r = p[:r, :] # doctest: +SKIP
>>> s_r = model.s[:r] # doctest: +SKIP
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x) # doctest: +SKIP
If `p` is a block matrix:
>>> p[:r, :].write(p_r_path) # doctest: +SKIP
>>> p_r = BlockMatrix.read(p_r_path) # doctest: +SKIP
>>> s_r = model.s[:r] # doctest: +SKIP
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x, p_r_path) # doctest: +SKIP
This method applies no standardization to `z`.
Warning
-------
If `z` is a block matrix, then ideally `z` should be the result of
directly reading from disk (and possibly a transpose). This is most
critical if :math:`n > m`, because in this case multiplication by `z`
will result in all preceding transformations being repeated
``n / block_size`` times, as explained in :class:`.BlockMatrix`.
At least one dimension must be less than or equal to 46300.
See the warning in :meth:`.BlockMatrix.svd` for performance
considerations.
Parameters
----------
y: :class:`numpy.ndarray`
:math:`n` vector of observations :math:`y`.
x: :class:`numpy.ndarray`
:math:`n \times p` matrix of fixed effects :math:`X`.
z: :class:`numpy.ndarray` or :class:`.BlockMatrix`
:math:`n \times m` matrix of random effects :math:`Z`.
p_path: :class:`str`, optional
Path at which to write :math:`P` as a block matrix.
Required if `z` is a block matrix.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
max_condition_number: :obj:`float`
Maximum condition number. Must be greater than 1e-16.
complexity_bound: :obj:`int`
Complexity bound for :meth:`.BlockMatrix.svd` when `z` is a block
matrix.
Returns
-------
model: :class:`LinearMixedModel`
Model constructed from :math:`y`, :math:`X`, and :math:`Z`.
p: :class:`numpy.ndarray` or :class:`.BlockMatrix`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
The type is block matrix if `z` is a block matrix and
:meth:`.BlockMatrix.svd` of `z` returns :math:`U` as a block matrix.
"""
z_is_bm = isinstance(z, BlockMatrix)
if z_is_bm and p_path is None:
raise ValueError("from_random_effects: 'p_path' required when 'z'"
"is a block matrix.")
if max_condition_number < 1e-16:
raise ValueError("from_random_effects: 'max_condition_number' must "
f"be at least 1e-16, found {max_condition_number}")
_check_dims(y, "y", 1)
_check_dims(x, "x", 2)
_check_dims(z, "z", 2)
n, m = z.shape
if y.shape[0] != n:
raise ValueError("from_random_effects: 'y' and 'z' must have the "
"same number of rows")
if x.shape[0] != n:
raise ValueError("from_random_effects: 'x' and 'z' must have the "
"same number of rows")
if z_is_bm:
u, s0, _ = z.svd(complexity_bound=complexity_bound)
p = u.T
p_is_bm = isinstance(p, BlockMatrix)
else:
u, s0, _ = hl.linalg._svd(z, full_matrices=False)
p = u.T
p_is_bm = False
s = s0 ** 2
low_rank = n > m
if low_rank:
assert np.all(np.isfinite(s))
r = int(np.searchsorted(-s, -max_condition_number * s[0]))
if r < m:
info(f'from_random_effects: model rank reduced from {m} to {r} '
f'due to ill-condition.'
f'\n Largest dropped eigenvalue was {s[r]}.')
s = s[:r]
p = p[:r, :]
if p_path is not None:
if p_is_bm:
p.write(p_path, overwrite=overwrite)
p = BlockMatrix.read(p_path)
else:
BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)
if p_is_bm:
py, px = (p @ y.reshape(n, 1)).to_numpy().flatten(), (p @ x).to_numpy()
else:
py, px = p @ y, p @ x
if low_rank:
model = LinearMixedModel(py, px, s, y, x, p_path)
else:
model = LinearMixedModel(py, px, s, p_path=p_path)
return model, p
# checks agreement of model initialization
def _same(self, other, tol=1e-6, up_to_sign=True):
def same_rows_up_to_sign(a, b, atol):
assert a.shape[0] == b.shape[0]
return all(np.allclose(a[i], b[i], atol=atol)
or np.allclose(-a[i], b[i], atol=atol)
for i in range(a.shape[0]))
close = same_rows_up_to_sign if up_to_sign else np.allclose
if self.low_rank != other.low_rank:
print(f'different low_rank: {self.low_rank}, {other.low_rank}')
return False
same = True
if not close(self.py, other.py, atol=tol):
print(f'different py:\n{self.py}\n{other.py}')
same = False
if not close(self.px, other.px, atol=tol):
print(f'different px:\n{self.px}\n{other.px}')
same = False
if not np.allclose(self.s, other.s, atol=tol):
print(f'different s:\n{self.s}\n{other.s}')
same = False
if self.low_rank and not close(self.y, other.y, atol=tol):
print(f'different y:\n{self.y}\n{other.y}')
same = False
if self.low_rank and not close(self.x, other.x, atol=tol):
print(f'different x\n{self.x}\n{other.x}')
same = False
if self.p_path != other.p_path:
print(f'different p_path:\n{self.p_path}\n{other.p_path}')
same = False
return same
| import numpy as np
import pandas as pd
import hail as hl
from hail.linalg import BlockMatrix
from hail.linalg.utils import _check_dims
from hail.table import Table
from hail.typecheck import typecheck_method, nullable, tupleof, oneof, numeric
from hail.utils.java import Env, info
from hail.utils.misc import plural
class LinearMixedModel(object):
r"""Class representing a linear mixed model.
.. include:: ../_templates/experimental.rst
:class:`LinearMixedModel` represents a linear model of the form
.. math::
y \sim \mathrm{N}(X \beta, \, \sigma^2 K + \tau^2 I)
where
- :math:`\mathrm{N}` is a :math:`n`-dimensional normal distribution.
- :math:`y` is a known vector of :math:`n` observations.
- :math:`X` is a known :math:`n \times p` design matrix for :math:`p` fixed effects.
- :math:`K` is a known :math:`n \times n` positive semi-definite kernel.
- :math:`I` is the :math:`n \times n` identity matrix.
- :math:`\beta` is a :math:`p`-parameter vector of fixed effects.
- :math:`\sigma^2` is the variance parameter on :math:`K`.
- :math:`\tau^2` is the variance parameter on :math:`I`.
In particular, the residuals for the :math:`i^\mathit{th}` and :math:`j^\mathit{th}`
observations have covariance :math:`\sigma^2 K_{ij}` for :math:`i \neq j`.
This model is equivalent to a
`mixed model <https://en.wikipedia.org/wiki/Mixed_model>`__
of the form
.. math::
y = X \beta + Z u + \epsilon
by setting :math:`K = ZZ^T` where
- :math:`Z` is a known :math:`n \times r` design matrix for :math:`r` random effects.
- :math:`u` is a :math:`r`-vector of random effects drawn from :math:`\mathrm{N}(0, \sigma^2 I)`.
- :math:`\epsilon` is a :math:`n`-vector of random errors drawn from :math:`\mathrm{N}(0, \tau^2 I)`.
However, :class:`LinearMixedModel` does not itself realize :math:`K` as a linear kernel
with respect to random effects, nor does it take :math:`K` explicitly as input. Rather,
via the eigendecomposion :math:`K = U S U^T`, the the class leverages a third, decorrelated
form of the model
.. math::
Py \sim \mathrm{N}(PX \beta, \, \sigma^2 (\gamma S + I))
where
- :math:`P = U^T: \mathbb{R}^n \rightarrow \mathbb{R}^n` is an orthonormal transformation
that decorrelates the observations. The rows of :math:`P` are an eigenbasis for :math:`K`.
- :math:`S` is the :math:`n \times n` diagonal matrix of corresponding eigenvalues.
- :math:`\gamma = \frac{\sigma^2}{\tau^2}` is the ratio of variance parameters.
Hence, the triple :math:`(Py, PX, S)` determines the probability
of the observations for any choice of model parameters, and is
therefore sufficient for inference.
This triple, with S encoded as a vector, is the default
("full-rank") initialization of the class.
:class:`LinearMixedModel` also provides an efficient strategy to fit the
model above with :math:`K` replaced by its rank-:math:`r` approximation
:math:`K_r = P_r^T S_r P_r` where
- :math:`P_r: \mathbb{R}^n \rightarrow \mathbb{R}^r` has orthonormal rows
consisting of the top :math:`r` eigenvectors of :math:`K`.
- :math:`S_r` is the :math:`r \times r` diagonal matrix of corresponding
non-zero eigenvalues.
For this low-rank model, the quintuple :math:`(P_r y, P_r X, S_r, y, X)`
is similarly sufficient for inference and corresponds to the "low-rank"
initialization of the class. Morally, :math:`y` and :math:`X` are
required for low-rank inference because the diagonal :math:`\gamma S + I`
is always full-rank.
If :math:`K` actually has rank :math:`r`, then :math:`K = K_r`
and the low-rank and full-rank models are equivalent.
Hence low-rank inference provides a more efficient, equally-exact
algorithm for fitting the full-rank model.
This situation arises, for example, when :math:`K` is the linear kernel
of a mixed model with fewer random effects than observations.
Even when :math:`K` has full rank, using a lower-rank approximation may
be an effective from of regularization, in addition to boosting
computational efficiency.
**Initialization**
The class may be initialized directly or with one of two methods:
- :meth:`from_kinship` takes :math:`y`, :math:`X`, and :math:`K` as ndarrays.
The model is always full-rank.
- :meth:`from_random_effects` takes :math:`y` and :math:`X` as ndarrays and
:math:`Z` as an ndarray or block matrix. The model is full-rank if and
only if :math:`n \leq m`.
Direct full-rank initialization takes :math:`Py`, :math:`PX`, and :math:`S`
as ndarrays. The following class attributes are set:
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `low_rank`
- bool
- ``False``
* - `n`
- int
- Number of observations :math:`n`
* - `f`
- int
- Number of fixed effects :math:`p`
* - `r`
- int
- Effective number of random effects, must equal :math:`n`
* - `py`
- ndarray
- Rotated response vector :math:`P y` with shape :math:`(n)`
* - `px`
- ndarray
- Rotated design matrix :math:`P X` with shape :math:`(n, p)`
* - `s`
- ndarray
- Eigenvalues vector :math:`S` of :math:`K` with shape :math:`(n)`
* - `p_path`
- str
- Path at which :math:`P` is stored as a block matrix
Direct low-rank initialization takes :math:`P_r y`, :math:`P_r X`, :math:`S_r`,
:math:`y`, and :math:`X` as ndarrays. The following class attributes are set:
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `low_rank`
- bool
- ``True``
* - `n`
- int
- Number of observations :math:`n`
* - `f`
- int
- Number of fixed effects :math:`p`
* - `r`
- int
- Effective number of random effects, must be less than :math:`n`
* - `py`
- ndarray
- Projected response vector :math:`P_r y` with shape :math:`(r)`
* - `px`
- ndarray
- Projected design matrix :math:`P_r X` with shape :math:`(r, p)`
* - `s`
- ndarray
- Eigenvalues vector :math:`S_r` of :math:`K_r` with shape :math:`(r)`
* - `y`
- ndarray
- Response vector with shape :math:`(n)`
* - `x`
- ndarray
- Design matrix with shape :math:`(n, p)`
* - `p_path`
- str
- Path at which :math:`P` is stored as a block matrix
**Fitting the model**
:meth:`fit` uses `restricted maximum likelihood
<https://en.wikipedia.org/wiki/Restricted_maximum_likelihood>`__ (REML)
to estimate :math:`(\beta, \sigma^2, \tau^2)`.
This is done by numerical optimization of the univariate function
:meth:`compute_neg_log_reml`, which itself optimizes REML constrained to a
fixed ratio of variance parameters. Each evaluation of
:meth:`compute_neg_log_reml` has computational complexity
.. math::
\mathit{O}(rp^2 + p^3).
:meth:`fit` adds the following attributes at this estimate.
.. list-table::
:header-rows: 1
* - Attribute
- Type
- Value
* - `beta`
- ndarray
- :math:`\beta`
* - `sigma_sq`
- float
- :math:`\sigma^2`
* - `tau_sq`
- float
- :math:`\tau^2`
* - `gamma`
- float
- :math:`\gamma = \frac{\sigma^2}{\tau^2}`
* - `log_gamma`
- float
- :math:`\log{\gamma}`
* - `h_sq`
- float
- :math:`\mathit{h}^2 = \frac{\sigma^2}{\sigma^2 + \tau^2}`
* - `h_sq_standard_error`
- float
- asymptotic estimate of :math:`\mathit{h}^2` standard error
**Testing alternative models**
The model is also equivalent to its augmentation
.. math::
y \sim \mathrm{N}\left(x_\star\beta_\star + X \beta, \, \sigma^2 K + \tau^2 I\right)
by an additional covariate of interest :math:`x_\star` under the
null hypothesis that the corresponding fixed effect parameter
:math:`\beta_\star` is zero. Similarly to initialization, full-rank testing
of the alternative hypothesis :math:`\beta_\star \neq 0` requires
:math:`P x_\star`, whereas the low-rank testing requires :math:`P_r x_\star`
and :math:`x_\star`.
After running :meth:`fit` to fit the null model, one can test each of a
collection of alternatives using either of two implementations of the
likelihood ratio test:
- :meth:`fit_alternatives_numpy` takes one or two ndarrays. It is a pure Python
method that evaluates alternatives serially on leader (master).
- :meth:`fit_alternatives` takes one or two paths to block matrices. It
evaluates alternatives in parallel on the workers.
Per alternative, both have computational complexity
.. math::
\mathit{O}(rp + p^3).
Parameters
----------
py: :class:`numpy.ndarray`
Projected response vector :math:`P_r y` with shape :math:`(r)`.
px: :class:`numpy.ndarray`
Projected design matrix :math:`P_r X` with shape :math:`(r, p)`.
s: :class:`numpy.ndarray`
Eigenvalues vector :math:`S` with shape :math:`(r)`.
y: :class:`numpy.ndarray`, optional
Response vector with shape :math:`(n)`.
Include for low-rank inference.
x: :class:`numpy.ndarray`, optional
Design matrix with shape :math:`(n, p)`.
Include for low-rank inference.
p_path: :class:`str`, optional
Path at which :math:`P` has been stored as a block matrix.
"""
@typecheck_method(py=np.ndarray,
px=np.ndarray,
s=np.ndarray,
y=nullable(np.ndarray),
x=nullable(np.ndarray),
p_path=nullable(str))
def __init__(self, py, px, s, y=None, x=None, p_path=None):
if y is None and x is None:
low_rank = False
elif y is not None and x is not None:
low_rank = True
else:
raise ValueError('for low-rank, set both y and x; for full-rank, do not set y or x.')
_check_dims(py, 'py', 1)
_check_dims(px, 'px', 2)
_check_dims(s, 's', 1)
r = s.size
f = px.shape[1]
if py.size != r:
raise ValueError("py and s must have the same size")
if px.shape[0] != r:
raise ValueError("px must have the same number of rows as the size of s")
if low_rank:
_check_dims(y, 'y', 1)
_check_dims(x, 'x', 2)
n = y.size
if n <= r:
raise ValueError("size of y must be larger than the size of s")
if x.shape[0] != n:
raise ValueError("x must have the same number of rows as the size of y")
if x.shape[1] != f:
raise ValueError("px and x must have the same number columns")
else:
n = r
if p_path is not None:
n_rows, n_cols = BlockMatrix.read(p_path).shape
if n_cols != n:
raise ValueError("LinearMixedModel: Number of columns in the block "
f"matrix at 'p_path' ({n_cols}) must equal "
f"the size of 'y' ({n})")
if n_rows != r:
raise ValueError("LinearMixedModel: Number of rows in the block "
f"matrix at 'p_path' ({n_rows}) must equal "
f"the size of 'py' ({r})")
self.low_rank = low_rank
self.n = n
self.f = f
self.r = r
self.py = py
self.px = px
self.s = s
self.y = y
self.x = x
self.p_path = p_path
self._check_dof()
self.beta = None
self.sigma_sq = None
self.tau_sq = None
self.gamma = None
self.log_gamma = None
self.h_sq = None
self.h_sq_standard_error = None
self.optimize_result = None
self._fitted = False
if low_rank:
self._yty = y @ y
self._xty = x.T @ y
self._xtx = x.T @ x
self._dof = n - f
self._d = None
self._ydy = None
self._xdy = None
self._xdx = None
self._dof_alt = n - (f + 1)
self._d_alt = None
self._ydy_alt = None
self._xdy_alt = np.zeros(f + 1)
self._xdx_alt = np.zeros((f + 1, f + 1))
self._residual_sq = None
self._scala_model = None
def _reset(self):
self._fitted = False
self.beta = None
self.sigma_sq = None
self.tau_sq = None
self.gamma = None
self.log_gamma = None
self.h_sq = None
self.h_sq_standard_error = None
self.optimize_result = None
def compute_neg_log_reml(self, log_gamma, return_parameters=False):
r"""Compute negative log REML constrained to a fixed value
of :math:`\log{\gamma}`.
This function computes the triple :math:`(\beta, \sigma^2, \tau^2)` with
:math:`\gamma = \frac{\sigma^2}{\tau^2}` at which the restricted
likelihood is maximized and returns the negative of the restricted log
likelihood at these parameters (shifted by the constant defined below).
The implementation has complexity :math:`\mathit{O}(rp^2 + p^3)` and is
inspired by `FaST linear mixed models for genome-wide association studies (2011)
<https://www.nature.com/articles/nmeth.1681>`__.
The formulae follow from `Bayesian Inference for Variance Components Using Only Error Contrasts (1974)
<http://faculty.dbmi.pitt.edu/day/Bioinf2132-advanced-Bayes-and-R/previousDocuments/Bioinf2132-documents-2016/2016-11-22/Harville-1974.pdf>`__.
Harville derives that for fixed covariance :math:`V`, the restricted
likelihood of the variance parameter :math:`V` in the model
.. math::
y \sim \mathrm{N}(X \beta, \, V)
is given by
.. math::
(2\pi)^{-\frac{1}{2}(n - p)}
\det(X^T X)^\frac{1}{2}
\det(V)^{-\frac{1}{2}}
\det(X^T V^{-1} X)^{-\frac{1}{2}}
e^{-\frac{1}{2}(y - X\hat\beta)^T V^{-1}(y - X\hat\beta)}.
with
.. math::
\hat\beta = (X^T V^{-1} X)^{-1} X^T V^{-1} y.
In our case, the variance is
.. math::
V = \sigma^2 K + \tau^2 I = \sigma^2 (K + \gamma^{-1} I)
which is determined up to scale by any fixed value of the ratio
:math:`\gamma`. So for input :math:`\log \gamma`, the
negative restricted log likelihood is minimized at
:math:`(\hat\beta, \hat\sigma^2)` with :math:`\hat\beta` as above and
.. math::
\hat\sigma^2 = \frac{1}{n - p}(y - X\hat\beta)^T (K + \gamma^{-1} I)^{-1}(y - X\hat\beta).
For :math:`\hat V` at this :math:`(\hat\beta, \hat\sigma^2, \gamma)`,
the exponent in the likelihood reduces to :math:`-\frac{1}{2}(n-p)`, so
the negative restricted log likelihood may be expressed as
.. math::
\frac{1}{2}\left(\log \det(\hat V) + \log\det(X^T \hat V^{-1} X)\right) + C
where
.. math::
C = \frac{1}{2}\left(n - p + (n - p)\log(2\pi) - \log\det(X^T X)\right)
only depends on :math:`X`. :meth:`compute_neg_log_reml` returns the value of
the first term, omitting the constant term.
Parameters
----------
log_gamma: :obj:`float`
Value of :math:`\log{\gamma}`.
return_parameters:
If ``True``, also return :math:`\beta`, :math:`\sigma^2`,
and :math:`\tau^2`.
Returns
-------
:obj:`float` or (:obj:`float`, :class:`numpy.ndarray`, :obj:`float`, :obj:`float`)
If `return_parameters` is ``False``, returns (shifted) negative log REML.
Otherwise, returns (shifted) negative log REML, :math:`\beta`, :math:`\sigma^2`,
and :math:`\tau^2`.
"""
from scipy.linalg import solve, LinAlgError
gamma = np.exp(log_gamma)
d = 1 / (self.s + 1 / gamma)
logdet_d = np.sum(np.log(d)) + (self.n - self.r) * log_gamma
if self.low_rank:
d -= gamma
dpy = d * self.py
ydy = self.py @ dpy + gamma * self._yty
xdy = self.px.T @ dpy + gamma * self._xty
xdx = (self.px.T * d) @ self.px + gamma * self._xtx
else:
dpy = d * self.py
ydy = self.py @ dpy
xdy = self.px.T @ dpy
xdx = (self.px.T * d) @ self.px
try:
beta = solve(xdx, xdy, assume_a='pos')
residual_sq = ydy - xdy.T @ beta
sigma_sq = residual_sq / self._dof
tau_sq = sigma_sq / gamma
neg_log_reml = (np.linalg.slogdet(xdx)[1] - logdet_d + self._dof * np.log(sigma_sq)) / 2
self._d, self._ydy, self._xdy, self._xdx = d, ydy, xdy, xdx # used in fit
if return_parameters:
return neg_log_reml, beta, sigma_sq, tau_sq
else:
return neg_log_reml
except LinAlgError as e:
raise Exception('linear algebra error while solving for REML estimate') from e
@typecheck_method(log_gamma=nullable(numeric), bounds=tupleof(numeric), tol=float, maxiter=int)
def fit(self, log_gamma=None, bounds=(-8.0, 8.0), tol=1e-8, maxiter=500):
r"""Find the triple :math:`(\beta, \sigma^2, \tau^2)` maximizing REML.
This method sets the attributes `beta`, `sigma_sq`, `tau_sq`, `gamma`,
`log_gamma`, `h_sq`, and `h_sq_standard_error` as described in the
top-level class documentation.
If `log_gamma` is provided, :meth:`fit` finds the REML solution
with :math:`\log{\gamma}` constrained to this value. In this case,
`h_sq_standard_error` is ``None`` since `h_sq` is not estimated.
Otherwise, :meth:`fit` searches for the value of :math:`\log{\gamma}`
that minimizes :meth:`compute_neg_log_reml`, and also sets the attribute
`optimize_result` of type `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`__.
Parameters
----------
log_gamma: :obj:`float`, optional
If provided, the solution is constrained to have this value of
:math:`\log{\gamma}`.
bounds: :obj:`float`, :obj:`float`
Lower and upper bounds for :math:`\log{\gamma}`.
tol: :obj:`float`
Absolute tolerance for optimizing :math:`\log{\gamma}`.
maxiter: :obj:`float`
Maximum number of iterations for optimizing :math:`\log{\gamma}`.
"""
if self._fitted:
self._reset()
fit_log_gamma = True if log_gamma is None else False
if fit_log_gamma:
from scipy.optimize import minimize_scalar
self.optimize_result = minimize_scalar(
self.compute_neg_log_reml,
method='bounded',
bounds=bounds,
options={'xatol': tol, 'maxiter': maxiter})
if self.optimize_result.success:
if self.optimize_result.x - bounds[0] < 0.001:
raise Exception("failed to fit log_gamma: optimum within 0.001 of lower bound.")
elif bounds[1] - self.optimize_result.x < 0.001:
raise Exception("failed to fit log_gamma: optimum within 0.001 of upper bound.")
else:
self.log_gamma = self.optimize_result.x
else:
raise Exception(f'failed to fit log_gamma:\n {self.optimize_result}')
else:
self.log_gamma = log_gamma
_, self.beta, self.sigma_sq, self.tau_sq = self.compute_neg_log_reml(self.log_gamma, return_parameters=True)
self.gamma = np.exp(self.log_gamma)
self.h_sq = self.sigma_sq / (self.sigma_sq + self.tau_sq)
self._residual_sq = self.sigma_sq * self._dof
self._d_alt = self._d
self._ydy_alt = self._ydy
self._xdy_alt[1:] = self._xdy
self._xdx_alt[1:, 1:] = self._xdx
if fit_log_gamma:
self.h_sq_standard_error = self._estimate_h_sq_standard_error()
self._fitted = True
def _estimate_h_sq_standard_error(self):
epsilon = 1e-4 # parabolic interpolation radius in log_gamma space
lg = self.log_gamma + np.array([-epsilon, 0.0, epsilon])
h2 = 1 / (1 + np.exp(-lg))
nll = [self.compute_neg_log_reml(lgi) for lgi in lg]
if nll[1] > nll[0] or nll[1] > nll[2]:
i = 0 if nll[1] > nll[0] else 2
raise Exception(f'Minimum of negative log likelihood fit as {nll[1]} at log_gamma={lg[1]},'
f'\n but found smaller value of {nll[i]} at log_gamma={lg[i]}.'
f'\n Investigate by plotting the negative log likelihood function.')
# Asymptotically near MLE, nLL = a * h2^2 + b * h2 + c with a = 1 / (2 * se^2)
# By Lagrange interpolation:
a = ((h2[2] * (nll[1] - nll[0]) + h2[1] * (nll[0] - nll[2]) + h2[0] * (nll[2] - nll[1]))
/ ((h2[1] - h2[0]) * (h2[0] - h2[2]) * (h2[2] - h2[1])))
return 1 / np.sqrt(2 * a)
def h_sq_normalized_lkhd(self):
r"""Estimate the normalized likelihood of :math:`\mathit{h}^2` over the
discrete grid of percentiles.
Examples
--------
Plot the estimated normalized likelihood function:
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP
Notes
-----
This method may be used to visualize the approximate posterior on
:math:`\mathit{h}^2` under a flat prior.
The resulting ndarray ``a`` has length 101 with ``a[i]`` equal to the
maximum likelihood over all :math:`\beta` and :math:`\sigma^2` with
:math:`\mathit{h}^2` constrained to ``i / 100``. The values for
``1 <= i <= 99`` are normalized to sum to 1, and ``a[0]`` and ``a[100]``
are set to ``nan``.
Returns
-------
:class:`numpy.ndarray` of :obj:`float`
Normalized likelihood values for :math:`\mathit{h}^2`.
"""
log_lkhd = np.zeros(101, dtype=np.float64)
log_lkhd[0], log_lkhd[100] = np.nan, np.nan
for h2 in range(1, 100):
gamma = h2 / (100.0 - h2)
log_lkhd[h2] = -self.compute_neg_log_reml(np.log(gamma))
log_lkhd -= np.max(log_lkhd[1:-1])
lkhd = np.exp(log_lkhd)
lkhd /= np.sum(lkhd[1:-1])
return lkhd
@typecheck_method(pa_t_path=str,
a_t_path=nullable(str),
partition_size=nullable(int))
def fit_alternatives(self, pa_t_path, a_t_path=None, partition_size=None):
r"""Fit and test alternative model for each augmented design matrix in parallel.
Notes
-----
The alternative model is fit using REML constrained to the value of
:math:`\gamma` set by :meth:`fit`.
The likelihood ratio test of fixed effect parameter :math:`\beta_\star`
uses (non-restricted) maximum likelihood:
.. math::
\chi^2 = 2 \log\left(\frac{
\max_{\beta_\star, \beta, \sigma^2}\mathrm{N}
(y \, | \, x_\star \beta_\star + X \beta; \sigma^2(K + \gamma^{-1}I)}
{\max_{\beta, \sigma^2} \mathrm{N}
(y \, | \, x_\star \cdot 0 + X \beta; \sigma^2(K + \gamma^{-1}I)}
\right)
The p-value is given by the tail probability under a chi-squared
distribution with one degree of freedom.
The resulting table has the following fields:
.. list-table::
:header-rows: 1
* - Field
- Type
- Value
* - `idx`
- int64
- Index of augmented design matrix.
* - `beta`
- float64
- :math:`\beta_\star`
* - `sigma_sq`
- float64
- :math:`\sigma^2`
* - `chi_sq`
- float64
- :math:`\chi^2`
* - `p_value`
- float64
- p-value
:math:`(P_r A)^T` and :math:`A^T` (if given) must have the same number
of rows (augmentations). These rows are grouped into partitions for
parallel processing. The number of partitions equals the ceiling of
``n_rows / partition_size``, and should be at least the number or cores
to make use of all cores. By default, there is one partition per row of
blocks in :math:`(P_r A)^T`. Setting the partition size to an exact
(rather than approximate) divisor or multiple of the block size reduces
superfluous shuffling of data.
The number of columns in each block matrix must be less than :math:`2^{31}`.
Warning
-------
The block matrices must be stored in row-major format, as results
from :meth:`.BlockMatrix.write` with ``force_row_major=True`` and from
:meth:`.BlockMatrix.write_from_entry_expr`. Otherwise, this method
will produce an error message.
Parameters
----------
pa_t_path: :class:`str`
Path to block matrix :math:`(P_r A)^T` with shape :math:`(m, r)`.
Each row is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a_t_path: :class:`str`, optional
Path to block matrix :math:`A^T` with shape :math:`(m, n)`.
Each row is an augmentation :math:`x_\star` of :math:`X`.
Include for low-rank inference.
partition_size: :obj:`int`, optional
Number of rows to process per partition.
Default given by block size of :math:`(P_r A)^T`.
Returns
-------
:class:`.Table`
Table of results for each augmented design matrix.
"""
from hail.table import Table
self._check_dof(self.f + 1)
if self.low_rank and a_t_path is None:
raise ValueError('model is low-rank so a_t is required.')
elif not (self.low_rank or a_t_path is None):
raise ValueError('model is full-rank so a_t must not be set.')
if self._scala_model is None:
self._set_scala_model()
backend = Env.spark_backend('LinearMixedModel.fit_alternatives')
jfs = backend.fs._jfs
if partition_size is None:
block_size = Env.hail().linalg.BlockMatrix.readMetadata(jfs, pa_t_path).blockSize()
partition_size = block_size
elif partition_size <= 0:
raise ValueError(f'partition_size must be positive, found {partition_size}')
jpa_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, pa_t_path, partition_size)
if a_t_path is None:
maybe_ja_t = None
else:
maybe_ja_t = Env.hail().linalg.RowMatrix.readBlockMatrix(jfs, a_t_path, partition_size)
return Table._from_java(backend._jbackend.pyFitLinearMixedModel(
self._scala_model, jpa_t, maybe_ja_t))
@typecheck_method(pa=np.ndarray, a=nullable(np.ndarray), return_pandas=bool)
def fit_alternatives_numpy(self, pa, a=None, return_pandas=False):
r"""Fit and test alternative model for each augmented design matrix.
Notes
-----
This Python-only implementation runs serially on leader (master). See
the scalable implementation :meth:`fit_alternatives` for documentation
of the returned table.
Parameters
----------
pa: :class:`numpy.ndarray`
Projected matrix :math:`P_r A` of alternatives with shape :math:`(r, m)`.
Each column is a projected augmentation :math:`P_r x_\star` of :math:`P_r X`.
a: :class:`numpy.ndarray`, optional
Matrix :math:`A` of alternatives with shape :math:`(n, m)`.
Each column is an augmentation :math:`x_\star` of :math:`X`.
Required for low-rank inference.
return_pandas: :obj:`bool`
If true, return pandas dataframe. If false, return Hail table.
Returns
-------
:class:`.Table` or :class:`.pandas.DataFrame`
Table of results for each augmented design matrix.
"""
self._check_dof(self.f + 1)
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
n_cols = pa.shape[1]
assert pa.shape[0] == self.r
if self.low_rank:
assert a.shape[0] == self.n and a.shape[1] == n_cols
data = [(i,) + self._fit_alternative_numpy(pa[:, i], a[:, i]) for i in range(n_cols)]
else:
data = [(i,) + self._fit_alternative_numpy(pa[:, i], None) for i in range(n_cols)]
df = pd.DataFrame.from_records(data, columns=['idx', 'beta', 'sigma_sq', 'chi_sq', 'p_value'])
if return_pandas:
return df
else:
return Table.from_pandas(df, key='idx')
def _fit_alternative_numpy(self, pa, a):
from scipy.linalg import solve, LinAlgError
from scipy.stats.distributions import chi2
gamma = self.gamma
dpa = self._d_alt * pa
# single thread => no need to copy
ydy = self._ydy_alt
xdy = self._xdy_alt
xdx = self._xdx_alt
if self.low_rank:
xdy[0] = self.py @ dpa + gamma * (self.y @ a)
xdx[0, 0] = pa @ dpa + gamma * (a @ a)
xdx[0, 1:] = self.px.T @ dpa + gamma * (self.x.T @ a)
else:
xdy[0] = self.py @ dpa
xdx[0, 0] = pa @ dpa
xdx[0, 1:] = self.px.T @ dpa
try:
beta = solve(xdx, xdy, assume_a='pos') # only uses upper triangle
residual_sq = ydy - xdy.T @ beta
sigma_sq = residual_sq / self._dof_alt
chi_sq = self.n * np.log(self._residual_sq / residual_sq) # division => precision
p_value = chi2.sf(chi_sq, 1)
return beta[0], sigma_sq, chi_sq, p_value
except LinAlgError:
return tuple(4 * [float('nan')])
def _set_scala_model(self):
from hail.utils.java import Env
from hail.linalg import _jarray_from_ndarray, _breeze_from_ndarray
if not self._fitted:
raise Exception("null model is not fit. Run 'fit' first.")
self._scala_model = Env.hail().stats.LinearMixedModel.pyApply(
self.gamma,
self._residual_sq,
_jarray_from_ndarray(self.py),
_breeze_from_ndarray(self.px),
_jarray_from_ndarray(self._d_alt),
self._ydy_alt,
_jarray_from_ndarray(self._xdy_alt),
_breeze_from_ndarray(self._xdx_alt),
_jarray_from_ndarray(self.y) if self.low_rank else None,
_breeze_from_ndarray(self.x) if self.low_rank else None
)
def _check_dof(self, f=None):
if f is None:
f = self.f
dof = self.n - f
if dof <= 0:
raise ValueError(f"{self.n} {plural('observation', self.n)} with {f} fixed {plural('effect', f)} "
f"implies {dof} {plural('degree', dof)} of freedom. Must be positive.")
@classmethod
@typecheck_method(y=np.ndarray,
x=np.ndarray,
k=np.ndarray,
p_path=nullable(str),
overwrite=bool)
def from_kinship(cls, y, x, k, p_path=None, overwrite=False):
r"""Initializes a model from :math:`y`, :math:`X`, and :math:`K`.
Examples
--------
>>> from hail.stats import LinearMixedModel
>>> y = np.array([0.0, 1.0, 8.0, 9.0])
>>> x = np.array([[1.0, 0.0],
... [1.0, 2.0],
... [1.0, 1.0],
... [1.0, 4.0]])
>>> k = np.array([[ 1. , -0.8727875 , 0.96397335, 0.94512946],
... [-0.8727875 , 1. , -0.93036112, -0.97320323],
... [ 0.96397335, -0.93036112, 1. , 0.98294169],
... [ 0.94512946, -0.97320323, 0.98294169, 1. ]])
>>> model, p = LinearMixedModel.from_kinship(y, x, k)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.2525148830695317
>>> model.s # doctest: +SKIP_OUTPUT_CHECK
array([3.83501295, 0.13540343, 0.02454114, 0.00504248])
Truncate to a rank :math:`r=2` model:
>>> r = 2
>>> s_r = model.s[:r]
>>> p_r = p[:r, :]
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.25193197591429695
Notes
-----
This method eigendecomposes :math:`K = P^T S P` on the leader (master)
and returns ``LinearMixedModel(p @ y, p @ x, s)`` and ``p``.
The performance of eigendecomposition depends critically on the number
of leader (master) cores and the NumPy / SciPy configuration, viewable
with ``np.show_config()``. For Intel machines, we recommend installing
the `MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used.
Parameters
----------
y: :class:`numpy.ndarray`
:math:`n` vector of observations.
x: :class:`numpy.ndarray`
:math:`n \times p` matrix of fixed effects.
k: :class:`numpy.ndarray`
:math:`n \times n` positive semi-definite kernel :math:`K`.
p_path: :class:`str`, optional
Path at which to write :math:`P` as a block matrix.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
Returns
-------
model: :class:`LinearMixedModel`
Model constructed from :math:`y`, :math:`X`, and :math:`K`.
p: :class:`numpy.ndarray`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
"""
_check_dims(y, "y", 1)
_check_dims(x, "x", 2)
_check_dims(k, "k", 2)
n = k.shape[0]
if k.shape[1] != n:
raise ValueError("from_kinship: 'k' must be a square matrix")
if y.shape[0] != n:
raise ValueError("from_kinship: 'y' and 'k' must have the same "
"number of rows")
if x.shape[0] != n:
raise ValueError("from_kinship: 'x' and 'k' must have the same "
"number of rows")
s, u = hl.linalg._eigh(k)
if s[0] < -1e12 * s[-1]:
raise Exception("from_kinship: smallest eigenvalue of 'k' is"
f"negative: {s[0]}")
# flip singular values to descending order
s = np.flip(s, axis=0)
u = np.fliplr(u)
p = u.T
if p_path:
BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)
model = LinearMixedModel(p @ y, p @ x, s, p_path=p_path)
return model, p
@classmethod
@typecheck_method(y=np.ndarray,
x=np.ndarray,
z=oneof(np.ndarray, hl.linalg.BlockMatrix),
p_path=nullable(str),
overwrite=bool,
max_condition_number=float,
complexity_bound=int)
def from_random_effects(cls, y, x, z,
p_path=None,
overwrite=False,
max_condition_number=1e-10,
complexity_bound=8192):
r"""Initializes a model from :math:`y`, :math:`X`, and :math:`Z`.
Examples
--------
>>> from hail.stats import LinearMixedModel
>>> y = np.array([0.0, 1.0, 8.0, 9.0])
>>> x = np.array([[1.0, 0.0],
... [1.0, 2.0],
... [1.0, 1.0],
... [1.0, 4.0]])
>>> z = np.array([[0.0, 0.0, 1.0],
... [0.0, 1.0, 2.0],
... [1.0, 2.0, 4.0],
... [2.0, 4.0, 8.0]])
>>> model, p = LinearMixedModel.from_random_effects(y, x, z)
>>> model.fit()
>>> model.h_sq # doctest: +SKIP_OUTPUT_CHECK
0.38205307244271675
Notes
-----
If :math:`n \leq m`, the returned model is full rank.
If :math:`n > m`, the returned model is low rank. In this case only,
eigenvalues less than or equal to `max_condition_number` times the top
eigenvalue are dropped from :math:`S`, with the corresponding
eigenvectors dropped from :math:`P`. This guards against precision
loss on left eigenvectors computed via the right gramian :math:`Z^T Z`
in :meth:`.BlockMatrix.svd`.
In either case, one can truncate to a rank :math:`r` model as follows.
If `p` is an ndarray:
>>> p_r = p[:r, :] # doctest: +SKIP
>>> s_r = model.s[:r] # doctest: +SKIP
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x) # doctest: +SKIP
If `p` is a block matrix:
>>> p[:r, :].write(p_r_path) # doctest: +SKIP
>>> p_r = BlockMatrix.read(p_r_path) # doctest: +SKIP
>>> s_r = model.s[:r] # doctest: +SKIP
>>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x, p_r_path) # doctest: +SKIP
This method applies no standardization to `z`.
Warning
-------
If `z` is a block matrix, then ideally `z` should be the result of
directly reading from disk (and possibly a transpose). This is most
critical if :math:`n > m`, because in this case multiplication by `z`
will result in all preceding transformations being repeated
``n / block_size`` times, as explained in :class:`.BlockMatrix`.
At least one dimension must be less than or equal to 46300.
See the warning in :meth:`.BlockMatrix.svd` for performance
considerations.
Parameters
----------
y: :class:`numpy.ndarray`
:math:`n` vector of observations :math:`y`.
x: :class:`numpy.ndarray`
:math:`n \times p` matrix of fixed effects :math:`X`.
z: :class:`numpy.ndarray` or :class:`.BlockMatrix`
:math:`n \times m` matrix of random effects :math:`Z`.
p_path: :class:`str`, optional
Path at which to write :math:`P` as a block matrix.
Required if `z` is a block matrix.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
max_condition_number: :obj:`float`
Maximum condition number. Must be greater than 1e-16.
complexity_bound: :obj:`int`
Complexity bound for :meth:`.BlockMatrix.svd` when `z` is a block
matrix.
Returns
-------
model: :class:`LinearMixedModel`
Model constructed from :math:`y`, :math:`X`, and :math:`Z`.
p: :class:`numpy.ndarray` or :class:`.BlockMatrix`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
The type is block matrix if `z` is a block matrix and
:meth:`.BlockMatrix.svd` of `z` returns :math:`U` as a block matrix.
"""
z_is_bm = isinstance(z, BlockMatrix)
if z_is_bm and p_path is None:
raise ValueError("from_random_effects: 'p_path' required when 'z'"
"is a block matrix.")
if max_condition_number < 1e-16:
raise ValueError("from_random_effects: 'max_condition_number' must "
f"be at least 1e-16, found {max_condition_number}")
_check_dims(y, "y", 1)
_check_dims(x, "x", 2)
_check_dims(z, "z", 2)
n, m = z.shape
if y.shape[0] != n:
raise ValueError("from_random_effects: 'y' and 'z' must have the "
"same number of rows")
if x.shape[0] != n:
raise ValueError("from_random_effects: 'x' and 'z' must have the "
"same number of rows")
if z_is_bm:
u, s0, _ = z.svd(complexity_bound=complexity_bound)
p = u.T
p_is_bm = isinstance(p, BlockMatrix)
else:
u, s0, _ = hl.linalg._svd(z, full_matrices=False)
p = u.T
p_is_bm = False
s = s0 ** 2
low_rank = n > m
if low_rank:
assert np.all(np.isfinite(s))
r = int(np.searchsorted(-s, -max_condition_number * s[0]))
if r < m:
info(f'from_random_effects: model rank reduced from {m} to {r} '
f'due to ill-condition.'
f'\n Largest dropped eigenvalue was {s[r]}.')
s = s[:r]
p = p[:r, :]
if p_path is not None:
if p_is_bm:
p.write(p_path, overwrite=overwrite)
p = BlockMatrix.read(p_path)
else:
BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)
if p_is_bm:
py, px = (p @ y.reshape(n, 1)).to_numpy().flatten(), (p @ x).to_numpy()
else:
py, px = p @ y, p @ x
if low_rank:
model = LinearMixedModel(py, px, s, y, x, p_path)
else:
model = LinearMixedModel(py, px, s, p_path=p_path)
return model, p
# checks agreement of model initialization
def _same(self, other, tol=1e-6, up_to_sign=True):
def same_rows_up_to_sign(a, b, atol):
assert a.shape[0] == b.shape[0]
return all(np.allclose(a[i], b[i], atol=atol)
or np.allclose(-a[i], b[i], atol=atol)
for i in range(a.shape[0]))
close = same_rows_up_to_sign if up_to_sign else np.allclose
if self.low_rank != other.low_rank:
print(f'different low_rank: {self.low_rank}, {other.low_rank}')
return False
same = True
if not close(self.py, other.py, atol=tol):
print(f'different py:\n{self.py}\n{other.py}')
same = False
if not close(self.px, other.px, atol=tol):
print(f'different px:\n{self.px}\n{other.px}')
same = False
if not np.allclose(self.s, other.s, atol=tol):
print(f'different s:\n{self.s}\n{other.s}')
same = False
if self.low_rank and not close(self.y, other.y, atol=tol):
print(f'different y:\n{self.y}\n{other.y}')
same = False
if self.low_rank and not close(self.x, other.x, atol=tol):
print(f'different x\n{self.x}\n{other.x}')
same = False
if self.p_path != other.p_path:
print(f'different p_path:\n{self.p_path}\n{other.p_path}')
same = False
return same
|
from typing import Optional, Callable, Type, Union, List, Any, Iterable
from types import TracebackType
from io import BytesIO
import asyncio
import concurrent
import dill
import functools
import sys
from hailtop.utils import secret_alnum_string, partition
import hailtop.batch_client.aioclient as low_level_batch_client
from hailtop.batch_client.parse import parse_cpu_in_mcpu
import hailtop.aiogoogle as aiogoogle
from .batch import Batch
from .backend import ServiceBackend
if sys.version_info < (3, 7):
def create_task(coro, *, name=None): # pylint: disable=unused-argument
return asyncio.ensure_future(coro)
else:
def create_task(*args, **kwargs):
return asyncio.create_task(*args, **kwargs) # pylint: disable=no-member
def cpu_spec_to_float(spec: Union[int, str]) -> float:
if isinstance(spec, str):
mcpu = parse_cpu_in_mcpu(spec)
assert mcpu is not None
return mcpu / 1000
return float(spec)
def chunk(fn):
def chunkedfn(*args):
return [fn(*arglist) for arglist in zip(*args)]
return chunkedfn
def async_to_blocking(coro):
return asyncio.get_event_loop().run_until_complete(coro)
class BatchPoolExecutor:
"""An executor which executes Python functions in the cloud.
:class:`.concurrent.futures.ProcessPoolExecutor` and
:class:`.concurrent.futures.ThreadPoolExecutor` enable the use of all the
computer cores available on a single computer. :class:`.BatchPoolExecutor`
enables the use of an effectively arbitrary number of cloud computer cores.
Functions provided to :meth:`.submit` are serialized using `dill
<https://dill.readthedocs.io/en/latest/dill.html>`__, sent to a Python
docker container in the cloud, deserialized, and executed. The results are
serialized and returned to the machine from which :meth:`.submit` was
called. The Python version in the docker container will share a major and
minor verison with the local process. The `image` parameter overrides this
behavior.
When used as a context manager (the ``with`` syntax), the executor will wait
for all jobs to finish before finishing the ``with`` statement. This
behavior can be controlled by the `wait_on_exit` parameter.
This class creates a folder ``batch-pool-executor`` at the root of the
bucket specified by the `backend`. This folder can be safely deleted after
all jobs have completed.
Examples
--------
Add ``3`` to ``6`` on a machine in the cloud and send the result back to
this machine:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future_nine = bpe.submit(lambda: 3 + 6)
>>> future_nine.result() # doctest: +SKIP
9
:meth:`.map` facilitates the common case of executing a function on many
values in parallel:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x: x * 3, range(4)))
[0, 3, 6, 9]
Parameters
----------
name:
A name for the executor. Executors produce many batches and each batch
will include this name as a prefix.
backend:
Backend used to execute the jobs. Must be a :class:`.ServiceBackend`.
image:
The name of a Docker image used for each submitted job. The image must
include Python 3.6 or later and must have the ``dill`` Python package
installed. If you intend to use ``numpy``, ensure that OpenBLAS is also
installed. If unspecified, an image with a matching Python verison and
``numpy``, ``scipy``, and ``sklearn`` installed is used.
cpus_per_job:
The number of CPU cores to allocate to each job. The default value is
``1``. The parameter is passed unaltered to :meth:`.Job.cpu`. This
parameter's value is used to set several environment variables
instructing BLAS and LAPACK to limit core use.
wait_on_exit:
If ``True`` or unspecified, wait for all jobs to complete when exiting a
context. If ``False``, do not wait. This option has no effect if this
executor is not used with the ``with`` syntax.
cleanup_bucket:
If ``True`` or unspecified, delete all temporary files in the cloud
storage bucket when this executor fully shuts down. If Python crashes
before the executor is shutdown, the files will not be deleted.
project:
If specified, the project to use when authenticating with Google
Storage. Google Storage is used to transfer serialized values between
this computer and the cloud machines that execute jobs.
"""
def __init__(self, *,
name: Optional[str] = None,
backend: Optional[ServiceBackend] = None,
image: Optional[str] = None,
cpus_per_job: Optional[Union[int, str]] = None,
wait_on_exit: bool = True,
cleanup_bucket: bool = True,
project: Optional[str] = None):
self.name = name or "BatchPoolExecutor-" + secret_alnum_string(4)
self.backend = backend or ServiceBackend()
if not isinstance(self.backend, ServiceBackend):
raise ValueError(f'BatchPoolExecutor is not compatible with {type(backend)}')
self.batches: List[Batch] = []
self.directory = self.backend.remote_tmpdir + f'batch-pool-executor/{self.name}/'
self.inputs = self.directory + 'inputs/'
self.outputs = self.directory + 'outputs/'
self.fs = aiogoogle.GoogleStorageAsyncFS(project=project)
self.futures: List[BatchPoolFuture] = []
self.finished_future_count = 0
self._shutdown = False
version = sys.version_info
if image is None:
if version.major != 3 or version.minor not in (6, 7, 8):
raise ValueError(
f'You must specify an image if you are using a Python version other than 3.6, 3.7, or 3.8 (you are using {version})')
self.image = f'hailgenetics/python-dill:{version.major}.{version.minor}-slim'
else:
self.image = image
self.cpus_per_job = cpus_per_job
self.cleanup_bucket = cleanup_bucket
self.wait_on_exit = wait_on_exit
def __enter__(self):
return self
def map(self,
fn: Callable,
*iterables: Iterable[Any],
timeout: Optional[Union[int, float]] = None,
chunksize: int = 1):
"""Call `fn` on cloud machines with arguments from `iterables`.
This function returns a generator which will produce each result in the
same order as the `iterables`, only blocking if the result is not yet
ready. You can convert the generator to a list with :class:`.list`.
Examples
--------
Do nothing, but on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x: x, range(4)))
[0, 1, 2, 3]
Call a function with two parameters, on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x, y: x + y,
... ["white", "cat", "best"],
... ["house", "dog", "friend"]))
["whitehouse", "catdog", "bestfriend"]
Generate products of random matrices, on the cloud:
>>> def random_product(seed):
... np.random.seed(seed)
... w = np.random.rand(1, 100)
... u = np.random.rand(100, 1)
... return float(w @ u)
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(random_product, range(4)))
[24.440006386777277, 23.325755364428026, 23.920184804993806, 25.47912882125101]
Parameters
----------
fn:
The function to execute.
iterables:
The `iterables` are zipped together and each tuple is used as
arguments to `fn`. See the second example for more detail. It is not
possible to pass keyword arguments. Each element of `iterables` must
have the same length.
timeout:
This is roughly a timeout on how long we wait on each function
call. Specifically, each call to the returned generator's
:class:`.BatchPoolFuture`
:meth:`.iterator.__next__` invokes :meth:`.BatchPoolFuture.result` with this
`timeout`.
chunksize:
The number of tasks to schedule in the same docker container. Docker
containers take about 5 seconds to start. Ideally, each task should
take an order of magnitude more time than start-up time. You can
make the chunksize larger to reduce parallelism but increase the
amount of meaningful work done per-container.
"""
agen = async_to_blocking(
self.async_map(fn, iterables, timeout=timeout, chunksize=chunksize))
def generator_from_async_generator(aiter):
try:
while True:
yield async_to_blocking(aiter.__anext__())
except StopAsyncIteration:
return
return generator_from_async_generator(agen.__aiter__())
async def async_map(self,
fn: Callable,
iterables: Iterable[Iterable[Any]],
timeout: Optional[Union[int, float]] = None,
chunksize: int = 1):
"""Aysncio compatible version of :meth:`.map`."""
if not iterables:
return iter([])
if chunksize > 1:
list_per_argument = [list(x) for x in iterables]
n = len(list_per_argument[0])
assert all(n == len(x) for x in list_per_argument)
n_chunks = (n + chunksize - 1) // chunksize
iterables_chunks = [list(partition(n_chunks, x)) for x in list_per_argument]
iterables_chunks = [
chunk for chunk in iterables_chunks if len(chunk) > 0]
fn = chunk(fn)
iterables = iterables_chunks
submit_tasks = [asyncio.ensure_future(self.async_submit(fn, *arguments))
for arguments in zip(*iterables)]
try:
bp_futures = [await t for t in submit_tasks]
except:
for t in submit_tasks:
if t.done() and not t.exception():
await t.result().async_cancel()
elif not t.done():
t.cancel()
raise
async def async_result_or_cancel_all(future):
try:
return await future.async_result(timeout=timeout)
except:
await asyncio.gather(*[bp_fut.async_cancel() for bp_fut in bp_futures], return_exceptions=True)
raise
if chunksize > 1:
return (val
for future in bp_futures
for val in await async_result_or_cancel_all(future))
return (await async_result_or_cancel_all(future)
for future in bp_futures)
def submit(self,
fn: Callable,
*args: Any,
**kwargs: Any
) -> 'BatchPoolFuture':
"""Call `fn` on a cloud machine with all remaining arguments and keyword arguments.
The function, any objects it references, the arguments, and the keyword
arguments will be serialized to the cloud machine. Python modules are
not serialized, so you must ensure any needed Python modules and
packages already present in the underlying Docker image. For more
details see the `default_image` argument to :class:`.BatchPoolExecutor`
This function does not return the function's output, it returns a
:class:`.BatchPoolFuture` whose :meth:`.BatchPoolFuture.result` method
can be used to access the value.
Examples
--------
Do nothing, but on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(lambda x: x, 4)
... future.result()
4
Call a function with two arguments and one keyword argument, on the
cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(lambda x, y, z: x + y + z,
... "poly", "ethyl", z="ene")
... future.result()
"polyethylene"
Generate a product of two random matrices, on the cloud:
>>> def random_product(seed):
... np.random.seed(seed)
... w = np.random.rand(1, 100)
... u = np.random.rand(100, 1)
... return float(w @ u)
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(random_product, 1)
... future.result()
[23.325755364428026]
Parameters
----------
fn:
The function to execute.
args:
Arguments for the funciton.
kwargs:
Keyword arguments for the function.
"""
return async_to_blocking(
self.async_submit(fn, *args, **kwargs))
async def async_submit(self,
unapplied: Callable,
*args: Any,
**kwargs: Any
) -> 'BatchPoolFuture':
"""Aysncio compatible version of :meth:`BatchPoolExecutor.submit`."""
if self._shutdown:
raise RuntimeError('BatchPoolExecutor has already been shutdown.')
try:
name = unapplied.__name__
except AttributeError:
name = '<anonymous>'
name = f'{name}-{secret_alnum_string(4)}'
batch = Batch(name=self.name + '-' + name,
backend=self.backend,
default_image=self.image)
self.batches.append(batch)
j = batch.new_job(name)
pipe = BytesIO()
dill.dump(functools.partial(unapplied, *args, **kwargs), pipe, recurse=True)
pipe.seek(0)
pickledfun_remote = self.inputs + f'{name}/pickledfun'
await self.fs.write(pickledfun_remote, pipe.getvalue())
pickledfun_local = batch.read_input(pickledfun_remote)
thread_limit = "1"
if self.cpus_per_job:
j.cpu(self.cpus_per_job)
thread_limit = str(int(max(1.0, cpu_spec_to_float(self.cpus_per_job))))
j.env("OMP_NUM_THREADS", thread_limit)
j.env("OPENBLAS_NUM_THREADS", thread_limit)
j.env("MKL_NUM_THREADS", thread_limit)
j.env("VECLIB_MAXIMUM_THREADS", thread_limit)
j.env("NUMEXPR_NUM_THREADS", thread_limit)
j.command('set -ex')
j.command(f'''python3 -c "
import base64
import dill
import traceback
with open(\\"{j.ofile}\\", \\"wb\\") as out:
try:
with open(\\"{pickledfun_local}\\", \\"rb\\") as f:
dill.dump((dill.load(f)(), None), out, recurse=True)
except Exception as e:
print(\\"BatchPoolExecutor encountered an exception:\\")
traceback.print_exc()
dill.dump((e, traceback.format_exception(type(e), e, e.__traceback__)), out, recurse=True)
"''')
output_gcs = self.outputs + f'{name}/output'
batch.write_output(j.ofile, output_gcs)
backend_batch = batch.run(wait=False,
disable_progress_bar=True)._async_batch
try:
return BatchPoolFuture(self,
backend_batch,
low_level_batch_client.Job.submitted_job(
backend_batch, 1),
output_gcs)
except:
await backend_batch.cancel()
raise
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]):
self.shutdown(wait=self.wait_on_exit)
def _add_future(self, f):
self.futures.append(f)
def _finish_future(self):
self.finished_future_count += 1
if self._shutdown and self.finished_future_count == len(self.futures):
self._cleanup()
def shutdown(self, wait: bool = True):
"""Allow temporary resources to be cleaned up.
Until shutdown is called, some temporary cloud storage files will
persist. After shutdown has been called *and* all outstanding jobs have
completed, these files will be deleted.
Parameters
----------
wait:
If true, wait for all jobs to complete before returning from this
method.
"""
if wait:
async def ignore_exceptions(f):
try:
await f.async_result()
except Exception:
pass
async_to_blocking(
asyncio.gather(*[ignore_exceptions(f) for f in self.futures]))
if self.finished_future_count == len(self.futures):
self._cleanup()
self._shutdown = True
def _cleanup(self):
if self.cleanup_bucket:
async_to_blocking(self.fs.rmtree(None, self.directory))
async_to_blocking(self.fs.close())
self.backend.close()
class BatchPoolFuture:
def __init__(self,
executor: BatchPoolExecutor,
batch: low_level_batch_client.Batch,
job: low_level_batch_client.Job,
output_file: str):
self.executor = executor
self.batch = batch
self.job = job
self.output_file = output_file
self.fetch_coro = asyncio.ensure_future(self._async_fetch_result())
executor._add_future(self)
def cancel(self):
"""Cancel this job if it has not yet been cancelled.
``True`` is returned if the job is cancelled. ``False`` is returned if
the job has already completed.
"""
return async_to_blocking(self.async_cancel())
async def async_cancel(self):
"""Asynchronously cancel this job.
``True`` is returned if the job is cancelled. ``False`` is returned if
the job has already completed.
"""
if self.fetch_coro.cancelled():
return False
if self.fetch_coro.done():
# retrieve any exceptions raised
self.fetch_coro.result()
return False
self.fetch_coro.cancel()
await asyncio.wait([self.fetch_coro])
return True
def cancelled(self):
"""Returns ``True`` if :meth:`.cancel` was called before a value was produced.
"""
return self.fetch_coro.cancelled()
def running(self): # pylint: disable=no-self-use
"""Always returns False.
This future can always be cancelled, so this function always returns False.
"""
return False
def done(self):
"""Returns `True` if the function is complete and not cancelled.
"""
return self.fetch_coro.done()
def result(self, timeout: Optional[Union[float, int]] = None):
"""Blocks until the job is complete.
If the job has been cancelled, this method raises a
:class:`.concurrent.futures.CancelledError`.
If the job has timed out, this method raises an
:class:`.concurrent.futures.TimeoutError`.
Parameters
----------
timeout:
Wait this long before raising a timeout error.
"""
try:
return async_to_blocking(self.async_result(timeout))
except asyncio.TimeoutError as e:
raise concurrent.futures.TimeoutError() from e
async def async_result(self, timeout: Optional[Union[float, int]] = None):
"""Asynchronously wait until the job is complete.
If the job has been cancelled, this method raises a
:class:`.concurrent.futures.CancelledError`.
If the job has timed out, this method raises an
:class"`.concurrent.futures.TimeoutError`.
Parameters
----------
timeout:
Wait this long before raising a timeout error.
"""
if self.cancelled():
raise concurrent.futures.CancelledError()
try:
return await asyncio.wait_for(asyncio.shield(self.fetch_coro), timeout=timeout)
except asyncio.TimeoutError as e:
raise concurrent.futures.TimeoutError() from e
async def _async_fetch_result(self):
try:
await self.job.wait()
main_container_status = self.job._status['status']['container_statuses']['main']
if main_container_status['state'] == 'error':
raise ValueError(
f"submitted job failed:\n{main_container_status["error"]}")
value, traceback = dill.loads(
await self.executor.fs.read(self.output_file))
if traceback is None:
return value
assert isinstance(value, BaseException)
self.value = None
traceback = ''.join(traceback)
raise ValueError(f'submitted job failed:\n{traceback}')
finally:
await self.batch.cancel()
self.executor._finish_future()
def exception(self, timeout: Optional[Union[float, int]] = None):
"""Block until the job is complete and raise any exceptions.
"""
if self.cancelled():
raise concurrent.futures.CancelledError()
self.result(timeout)
def add_done_callback(self, fn):
"""NOT IMPLEMENTED
"""
raise NotImplementedError()
| from typing import Optional, Callable, Type, Union, List, Any, Iterable
from types import TracebackType
from io import BytesIO
import asyncio
import concurrent
import dill
import functools
import sys
from hailtop.utils import secret_alnum_string, partition
import hailtop.batch_client.aioclient as low_level_batch_client
from hailtop.batch_client.parse import parse_cpu_in_mcpu
import hailtop.aiogoogle as aiogoogle
from .batch import Batch
from .backend import ServiceBackend
if sys.version_info < (3, 7):
def create_task(coro, *, name=None): # pylint: disable=unused-argument
return asyncio.ensure_future(coro)
else:
def create_task(*args, **kwargs):
return asyncio.create_task(*args, **kwargs) # pylint: disable=no-member
def cpu_spec_to_float(spec: Union[int, str]) -> float:
if isinstance(spec, str):
mcpu = parse_cpu_in_mcpu(spec)
assert mcpu is not None
return mcpu / 1000
return float(spec)
def chunk(fn):
def chunkedfn(*args):
return [fn(*arglist) for arglist in zip(*args)]
return chunkedfn
def async_to_blocking(coro):
return asyncio.get_event_loop().run_until_complete(coro)
class BatchPoolExecutor:
"""An executor which executes Python functions in the cloud.
:class:`.concurrent.futures.ProcessPoolExecutor` and
:class:`.concurrent.futures.ThreadPoolExecutor` enable the use of all the
computer cores available on a single computer. :class:`.BatchPoolExecutor`
enables the use of an effectively arbitrary number of cloud computer cores.
Functions provided to :meth:`.submit` are serialized using `dill
<https://dill.readthedocs.io/en/latest/dill.html>`__, sent to a Python
docker container in the cloud, deserialized, and executed. The results are
serialized and returned to the machine from which :meth:`.submit` was
called. The Python version in the docker container will share a major and
minor verison with the local process. The `image` parameter overrides this
behavior.
When used as a context manager (the ``with`` syntax), the executor will wait
for all jobs to finish before finishing the ``with`` statement. This
behavior can be controlled by the `wait_on_exit` parameter.
This class creates a folder ``batch-pool-executor`` at the root of the
bucket specified by the `backend`. This folder can be safely deleted after
all jobs have completed.
Examples
--------
Add ``3`` to ``6`` on a machine in the cloud and send the result back to
this machine:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future_nine = bpe.submit(lambda: 3 + 6)
>>> future_nine.result() # doctest: +SKIP
9
:meth:`.map` facilitates the common case of executing a function on many
values in parallel:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x: x * 3, range(4)))
[0, 3, 6, 9]
Parameters
----------
name:
A name for the executor. Executors produce many batches and each batch
will include this name as a prefix.
backend:
Backend used to execute the jobs. Must be a :class:`.ServiceBackend`.
image:
The name of a Docker image used for each submitted job. The image must
include Python 3.6 or later and must have the ``dill`` Python package
installed. If you intend to use ``numpy``, ensure that OpenBLAS is also
installed. If unspecified, an image with a matching Python verison and
``numpy``, ``scipy``, and ``sklearn`` installed is used.
cpus_per_job:
The number of CPU cores to allocate to each job. The default value is
``1``. The parameter is passed unaltered to :meth:`.Job.cpu`. This
parameter's value is used to set several environment variables
instructing BLAS and LAPACK to limit core use.
wait_on_exit:
If ``True`` or unspecified, wait for all jobs to complete when exiting a
context. If ``False``, do not wait. This option has no effect if this
executor is not used with the ``with`` syntax.
cleanup_bucket:
If ``True`` or unspecified, delete all temporary files in the cloud
storage bucket when this executor fully shuts down. If Python crashes
before the executor is shutdown, the files will not be deleted.
project:
If specified, the project to use when authenticating with Google
Storage. Google Storage is used to transfer serialized values between
this computer and the cloud machines that execute jobs.
"""
def __init__(self, *,
name: Optional[str] = None,
backend: Optional[ServiceBackend] = None,
image: Optional[str] = None,
cpus_per_job: Optional[Union[int, str]] = None,
wait_on_exit: bool = True,
cleanup_bucket: bool = True,
project: Optional[str] = None):
self.name = name or "BatchPoolExecutor-" + secret_alnum_string(4)
self.backend = backend or ServiceBackend()
if not isinstance(self.backend, ServiceBackend):
raise ValueError(f'BatchPoolExecutor is not compatible with {type(backend)}')
self.batches: List[Batch] = []
self.directory = self.backend.remote_tmpdir + f'batch-pool-executor/{self.name}/'
self.inputs = self.directory + 'inputs/'
self.outputs = self.directory + 'outputs/'
self.fs = aiogoogle.GoogleStorageAsyncFS(project=project)
self.futures: List[BatchPoolFuture] = []
self.finished_future_count = 0
self._shutdown = False
version = sys.version_info
if image is None:
if version.major != 3 or version.minor not in (6, 7, 8):
raise ValueError(
f'You must specify an image if you are using a Python version other than 3.6, 3.7, or 3.8 (you are using {version})')
self.image = f'hailgenetics/python-dill:{version.major}.{version.minor}-slim'
else:
self.image = image
self.cpus_per_job = cpus_per_job
self.cleanup_bucket = cleanup_bucket
self.wait_on_exit = wait_on_exit
def __enter__(self):
return self
def map(self,
fn: Callable,
*iterables: Iterable[Any],
timeout: Optional[Union[int, float]] = None,
chunksize: int = 1):
"""Call `fn` on cloud machines with arguments from `iterables`.
This function returns a generator which will produce each result in the
same order as the `iterables`, only blocking if the result is not yet
ready. You can convert the generator to a list with :class:`.list`.
Examples
--------
Do nothing, but on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x: x, range(4)))
[0, 1, 2, 3]
Call a function with two parameters, on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(lambda x, y: x + y,
... ["white", "cat", "best"],
... ["house", "dog", "friend"]))
["whitehouse", "catdog", "bestfriend"]
Generate products of random matrices, on the cloud:
>>> def random_product(seed):
... np.random.seed(seed)
... w = np.random.rand(1, 100)
... u = np.random.rand(100, 1)
... return float(w @ u)
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... list(bpe.map(random_product, range(4)))
[24.440006386777277, 23.325755364428026, 23.920184804993806, 25.47912882125101]
Parameters
----------
fn:
The function to execute.
iterables:
The `iterables` are zipped together and each tuple is used as
arguments to `fn`. See the second example for more detail. It is not
possible to pass keyword arguments. Each element of `iterables` must
have the same length.
timeout:
This is roughly a timeout on how long we wait on each function
call. Specifically, each call to the returned generator's
:class:`.BatchPoolFuture`
:meth:`.iterator.__next__` invokes :meth:`.BatchPoolFuture.result` with this
`timeout`.
chunksize:
The number of tasks to schedule in the same docker container. Docker
containers take about 5 seconds to start. Ideally, each task should
take an order of magnitude more time than start-up time. You can
make the chunksize larger to reduce parallelism but increase the
amount of meaningful work done per-container.
"""
agen = async_to_blocking(
self.async_map(fn, iterables, timeout=timeout, chunksize=chunksize))
def generator_from_async_generator(aiter):
try:
while True:
yield async_to_blocking(aiter.__anext__())
except StopAsyncIteration:
return
return generator_from_async_generator(agen.__aiter__())
async def async_map(self,
fn: Callable,
iterables: Iterable[Iterable[Any]],
timeout: Optional[Union[int, float]] = None,
chunksize: int = 1):
"""Aysncio compatible version of :meth:`.map`."""
if not iterables:
return iter([])
if chunksize > 1:
list_per_argument = [list(x) for x in iterables]
n = len(list_per_argument[0])
assert all(n == len(x) for x in list_per_argument)
n_chunks = (n + chunksize - 1) // chunksize
iterables_chunks = [list(partition(n_chunks, x)) for x in list_per_argument]
iterables_chunks = [
chunk for chunk in iterables_chunks if len(chunk) > 0]
fn = chunk(fn)
iterables = iterables_chunks
submit_tasks = [asyncio.ensure_future(self.async_submit(fn, *arguments))
for arguments in zip(*iterables)]
try:
bp_futures = [await t for t in submit_tasks]
except:
for t in submit_tasks:
if t.done() and not t.exception():
await t.result().async_cancel()
elif not t.done():
t.cancel()
raise
async def async_result_or_cancel_all(future):
try:
return await future.async_result(timeout=timeout)
except:
await asyncio.gather(*[bp_fut.async_cancel() for bp_fut in bp_futures], return_exceptions=True)
raise
if chunksize > 1:
return (val
for future in bp_futures
for val in await async_result_or_cancel_all(future))
return (await async_result_or_cancel_all(future)
for future in bp_futures)
def submit(self,
fn: Callable,
*args: Any,
**kwargs: Any
) -> 'BatchPoolFuture':
"""Call `fn` on a cloud machine with all remaining arguments and keyword arguments.
The function, any objects it references, the arguments, and the keyword
arguments will be serialized to the cloud machine. Python modules are
not serialized, so you must ensure any needed Python modules and
packages already present in the underlying Docker image. For more
details see the `default_image` argument to :class:`.BatchPoolExecutor`
This function does not return the function's output, it returns a
:class:`.BatchPoolFuture` whose :meth:`.BatchPoolFuture.result` method
can be used to access the value.
Examples
--------
Do nothing, but on the cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(lambda x: x, 4)
... future.result()
4
Call a function with two arguments and one keyword argument, on the
cloud:
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(lambda x, y, z: x + y + z,
... "poly", "ethyl", z="ene")
... future.result()
"polyethylene"
Generate a product of two random matrices, on the cloud:
>>> def random_product(seed):
... np.random.seed(seed)
... w = np.random.rand(1, 100)
... u = np.random.rand(100, 1)
... return float(w @ u)
>>> with BatchPoolExecutor() as bpe: # doctest: +SKIP
... future = bpe.submit(random_product, 1)
... future.result()
[23.325755364428026]
Parameters
----------
fn:
The function to execute.
args:
Arguments for the funciton.
kwargs:
Keyword arguments for the function.
"""
return async_to_blocking(
self.async_submit(fn, *args, **kwargs))
async def async_submit(self,
unapplied: Callable,
*args: Any,
**kwargs: Any
) -> 'BatchPoolFuture':
"""Aysncio compatible version of :meth:`BatchPoolExecutor.submit`."""
if self._shutdown:
raise RuntimeError('BatchPoolExecutor has already been shutdown.')
try:
name = unapplied.__name__
except AttributeError:
name = '<anonymous>'
name = f'{name}-{secret_alnum_string(4)}'
batch = Batch(name=self.name + '-' + name,
backend=self.backend,
default_image=self.image)
self.batches.append(batch)
j = batch.new_job(name)
pipe = BytesIO()
dill.dump(functools.partial(unapplied, *args, **kwargs), pipe, recurse=True)
pipe.seek(0)
pickledfun_remote = self.inputs + f'{name}/pickledfun'
await self.fs.write(pickledfun_remote, pipe.getvalue())
pickledfun_local = batch.read_input(pickledfun_remote)
thread_limit = "1"
if self.cpus_per_job:
j.cpu(self.cpus_per_job)
thread_limit = str(int(max(1.0, cpu_spec_to_float(self.cpus_per_job))))
j.env("OMP_NUM_THREADS", thread_limit)
j.env("OPENBLAS_NUM_THREADS", thread_limit)
j.env("MKL_NUM_THREADS", thread_limit)
j.env("VECLIB_MAXIMUM_THREADS", thread_limit)
j.env("NUMEXPR_NUM_THREADS", thread_limit)
j.command('set -ex')
j.command(f'''python3 -c "
import base64
import dill
import traceback
with open(\\"{j.ofile}\\", \\"wb\\") as out:
try:
with open(\\"{pickledfun_local}\\", \\"rb\\") as f:
dill.dump((dill.load(f)(), None), out, recurse=True)
except Exception as e:
print(\\"BatchPoolExecutor encountered an exception:\\")
traceback.print_exc()
dill.dump((e, traceback.format_exception(type(e), e, e.__traceback__)), out, recurse=True)
"''')
output_gcs = self.outputs + f'{name}/output'
batch.write_output(j.ofile, output_gcs)
backend_batch = batch.run(wait=False,
disable_progress_bar=True)._async_batch
try:
return BatchPoolFuture(self,
backend_batch,
low_level_batch_client.Job.submitted_job(
backend_batch, 1),
output_gcs)
except:
await backend_batch.cancel()
raise
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]):
self.shutdown(wait=self.wait_on_exit)
def _add_future(self, f):
self.futures.append(f)
def _finish_future(self):
self.finished_future_count += 1
if self._shutdown and self.finished_future_count == len(self.futures):
self._cleanup()
def shutdown(self, wait: bool = True):
"""Allow temporary resources to be cleaned up.
Until shutdown is called, some temporary cloud storage files will
persist. After shutdown has been called *and* all outstanding jobs have
completed, these files will be deleted.
Parameters
----------
wait:
If true, wait for all jobs to complete before returning from this
method.
"""
if wait:
async def ignore_exceptions(f):
try:
await f.async_result()
except Exception:
pass
async_to_blocking(
asyncio.gather(*[ignore_exceptions(f) for f in self.futures]))
if self.finished_future_count == len(self.futures):
self._cleanup()
self._shutdown = True
def _cleanup(self):
if self.cleanup_bucket:
async_to_blocking(self.fs.rmtree(None, self.directory))
async_to_blocking(self.fs.close())
self.backend.close()
class BatchPoolFuture:
def __init__(self,
executor: BatchPoolExecutor,
batch: low_level_batch_client.Batch,
job: low_level_batch_client.Job,
output_file: str):
self.executor = executor
self.batch = batch
self.job = job
self.output_file = output_file
self.fetch_coro = asyncio.ensure_future(self._async_fetch_result())
executor._add_future(self)
def cancel(self):
"""Cancel this job if it has not yet been cancelled.
``True`` is returned if the job is cancelled. ``False`` is returned if
the job has already completed.
"""
return async_to_blocking(self.async_cancel())
async def async_cancel(self):
"""Asynchronously cancel this job.
``True`` is returned if the job is cancelled. ``False`` is returned if
the job has already completed.
"""
if self.fetch_coro.cancelled():
return False
if self.fetch_coro.done():
# retrieve any exceptions raised
self.fetch_coro.result()
return False
self.fetch_coro.cancel()
await asyncio.wait([self.fetch_coro])
return True
def cancelled(self):
"""Returns ``True`` if :meth:`.cancel` was called before a value was produced.
"""
return self.fetch_coro.cancelled()
def running(self): # pylint: disable=no-self-use
"""Always returns False.
This future can always be cancelled, so this function always returns False.
"""
return False
def done(self):
"""Returns `True` if the function is complete and not cancelled.
"""
return self.fetch_coro.done()
def result(self, timeout: Optional[Union[float, int]] = None):
"""Blocks until the job is complete.
If the job has been cancelled, this method raises a
:class:`.concurrent.futures.CancelledError`.
If the job has timed out, this method raises an
:class:`.concurrent.futures.TimeoutError`.
Parameters
----------
timeout:
Wait this long before raising a timeout error.
"""
try:
return async_to_blocking(self.async_result(timeout))
except asyncio.TimeoutError as e:
raise concurrent.futures.TimeoutError() from e
async def async_result(self, timeout: Optional[Union[float, int]] = None):
"""Asynchronously wait until the job is complete.
If the job has been cancelled, this method raises a
:class:`.concurrent.futures.CancelledError`.
If the job has timed out, this method raises an
:class"`.concurrent.futures.TimeoutError`.
Parameters
----------
timeout:
Wait this long before raising a timeout error.
"""
if self.cancelled():
raise concurrent.futures.CancelledError()
try:
return await asyncio.wait_for(asyncio.shield(self.fetch_coro), timeout=timeout)
except asyncio.TimeoutError as e:
raise concurrent.futures.TimeoutError() from e
async def _async_fetch_result(self):
try:
await self.job.wait()
main_container_status = self.job._status['status']['container_statuses']['main']
if main_container_status['state'] == 'error':
raise ValueError(
f"submitted job failed:\n{main_container_status['error']}")
value, traceback = dill.loads(
await self.executor.fs.read(self.output_file))
if traceback is None:
return value
assert isinstance(value, BaseException)
self.value = None
traceback = ''.join(traceback)
raise ValueError(f'submitted job failed:\n{traceback}')
finally:
await self.batch.cancel()
self.executor._finish_future()
def exception(self, timeout: Optional[Union[float, int]] = None):
"""Block until the job is complete and raise any exceptions.
"""
if self.cancelled():
raise concurrent.futures.CancelledError()
self.result(timeout)
def add_done_callback(self, fn):
"""NOT IMPLEMENTED
"""
raise NotImplementedError()
|
import json
import uuid
import pytest
from flask import url_for
from notifications_python_client.errors import HTTPError
from notifications_utils.url_safe_token import generate_token
from app.models.webauthn_credential import (
WebAuthnCredential,
WebAuthnCredentials,
)
from tests.conftest import (
create_api_user_active,
normalize_spaces,
url_for_endpoint_with_token,
)
def test_should_show_overview_page(
client_request,
):
page = client_request.get('main.user_profile')
assert page.select_one('h1').text.strip() == 'Your profile'
assert 'Use platform admin view' not in page
assert 'Security keys' not in page
def test_overview_page_shows_disable_for_platform_admin(
client_request,
platform_admin_user,
mocker
):
mocker.patch('app.models.webauthn_credential.WebAuthnCredentials.client_method')
client_request.login(platform_admin_user)
page = client_request.get('main.user_profile')
assert page.select_one('h1').text.strip() == 'Your profile'
disable_platform_admin_row = page.select_one('#disable-platform-admin')
assert ' '.join(disable_platform_admin_row.text.split()) == \
'Use platform admin view Yes Change whether to use platform admin view'
@pytest.mark.parametrize('key_count, expected_row_text', [
(0, 'Security keys None registered Change security keys'),
(1, 'Security keys 1 registered Change security keys'),
(2, 'Security keys 2 registered Change security keys'),
])
def test_overview_page_shows_security_keys_if_user_they_can_use_webauthn(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
key_count,
expected_row_text,
):
client_request.login(platform_admin_user)
credentials = [webauthn_credential for _ in range(key_count)]
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=credentials,
)
page = client_request.get('main.user_profile')
security_keys_row = page.select_one('#security-keys')
assert ' '.join(security_keys_row.text.split()) == expected_row_text
def test_should_show_name_page(
client_request
):
page = client_request.get(('main.user_profile_name'))
assert page.select_one('h1').text.strip() == 'Change your name'
def test_should_redirect_after_name_change(
client_request,
mock_update_user_attribute,
):
client_request.post(
'main.user_profile_name',
_data={'new_name': 'New Name'},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
assert mock_update_user_attribute.called is True
def test_should_show_email_page(
client_request,
):
page = client_request.get(
'main.user_profile_email'
)
assert page.select_one('h1').text.strip() == 'Change your email address'
def test_should_redirect_after_email_change(
client_request,
mock_login,
mock_email_is_not_already_in_use,
):
client_request.post(
'main.user_profile_email',
_data={'email_address': 'new_notify@notify.gov.uk'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_email_authenticate',
_external=True,
)
)
assert mock_email_is_not_already_in_use.called
@pytest.mark.parametrize('email_address,error_message', [
('me@example.com', 'Enter a public sector email address or find out who can use Notify'),
('not_valid', 'Enter a valid email address') # 2 errors with email address, only first error shown
])
def test_should_show_errors_if_new_email_address_does_not_validate(
client_request,
mock_email_is_not_already_in_use,
mock_get_organisations,
email_address,
error_message,
):
page = client_request.post(
'main.user_profile_email',
_data={'email_address': email_address},
_expected_status=200,
)
assert normalize_spaces(page.find('span', class_='govuk-error-message').text) == f'Error: {error_message}'
# We only call API to check if the email address is already in use if there are no other errors
assert not mock_email_is_not_already_in_use.called
def test_should_show_authenticate_after_email_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-email'] = 'new_notify@notify.gov.uk'
page = client_request.get('main.user_profile_email_authenticate')
assert 'Change your email address' in page.text
assert 'Confirm' in page.text
def test_should_render_change_email_continue_after_authenticate_email(
client_request,
mock_verify_password,
mock_send_change_email_verification,
):
with client_request.session_transaction() as session:
session['new-email'] = 'new_notify@notify.gov.uk'
page = client_request.post(
'main.user_profile_email_authenticate',
_data={'password': '12345'},
_expected_status=200,
)
assert 'Click the link in the email to confirm the change to your email address.' in page.text
def test_should_redirect_to_user_profile_when_user_confirms_email_link(
notify_admin,
logged_in_client,
api_user_active,
mock_update_user_attribute,
):
token = generate_token(payload=json.dumps({'user_id': api_user_active['id'], 'email': 'new_email@gov.uk'}),
secret=notify_admin.config['SECRET_KEY'], salt=notify_admin.config['DANGEROUS_SALT'])
response = logged_in_client.get(url_for_endpoint_with_token('main.user_profile_email_confirm',
token=token))
assert response.status_code == 302
assert response.location == url_for('main.user_profile', _external=True)
def test_should_show_mobile_number_page(
client_request,
):
page = client_request.get(('main.user_profile_mobile_number'))
assert 'Change your mobile number' in page.text
@pytest.mark.parametrize('phone_number_to_register_with', [
'+4407700900460',
'+1800-555-555',
])
def test_should_redirect_after_mobile_number_change(
client_request,
phone_number_to_register_with,
):
client_request.post(
'main.user_profile_mobile_number',
_data={'mobile_number': phone_number_to_register_with},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_mobile_number_authenticate',
_external=True,
)
)
with client_request.session_transaction() as session:
assert session['new-mob'] == phone_number_to_register_with
def test_should_show_authenticate_after_mobile_number_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-mob'] = '+441234123123'
page = client_request.get(
'main.user_profile_mobile_number_authenticate',
)
assert 'Change your mobile number' in page.text
assert 'Confirm' in page.text
def test_should_redirect_after_mobile_number_authenticate(
client_request,
mock_verify_password,
mock_send_verify_code,
):
with client_request.session_transaction() as session:
session['new-mob'] = '+441234123123'
client_request.post(
'main.user_profile_mobile_number_authenticate',
_data={'password': '12345667'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_mobile_number_confirm',
_external=True,
)
)
def test_should_show_confirm_after_mobile_number_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-mob-password-confirmed'] = True
page = client_request.get(
'main.user_profile_mobile_number_confirm'
)
assert 'Change your mobile number' in page.text
assert 'Confirm' in page.text
@pytest.mark.parametrize('phone_number_to_register_with', [
'+4407700900460',
'+1800-555-555',
])
def test_should_redirect_after_mobile_number_confirm(
client_request,
mocker,
mock_update_user_attribute,
mock_check_verify_code,
phone_number_to_register_with,
):
user_before = create_api_user_active(with_unique_id=True)
user_after = create_api_user_active(with_unique_id=True)
user_before['current_session_id'] = str(uuid.UUID(int=1))
user_after['current_session_id'] = str(uuid.UUID(int=2))
# first time (login decorator) return normally, second time (after 2FA return with new session id)
mocker.patch('app.user_api_client.get_user', side_effect=[user_before, user_after])
with client_request.session_transaction() as session:
session['new-mob-password-confirmed'] = True
session['new-mob'] = phone_number_to_register_with
session['current_session_id'] = user_before['current_session_id']
client_request.post(
'main.user_profile_mobile_number_confirm',
_data={'sms_code': '12345'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile',
_external=True,
)
)
# make sure the current_session_id has changed to what the API returned
with client_request.session_transaction() as session:
assert session['current_session_id'] == user_after['current_session_id']
def test_should_show_password_page(
client_request,
):
page = client_request.get(('main.user_profile_password'))
assert page.select_one('h1').text.strip() == 'Change your password'
def test_should_redirect_after_password_change(
client_request,
mock_update_user_password,
mock_verify_password,
):
client_request.post(
'main.user_profile_password',
_data={
'new_password': 'the new password',
'old_password': 'the old password',
},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile',
_external=True,
),
)
def test_non_gov_user_cannot_see_change_email_link(
client_request,
api_nongov_user_active,
mock_get_organisations,
):
client_request.login(api_nongov_user_active)
page = client_request.get('main.user_profile')
assert not page.find('a', {'href': url_for('main.user_profile_email')})
assert page.select_one('h1').text.strip() == 'Your profile'
def test_non_gov_user_cannot_access_change_email_page(
client_request,
api_nongov_user_active,
mock_get_organisations,
):
client_request.login(api_nongov_user_active)
client_request.get('main.user_profile_email', _expected_status=403)
def test_normal_user_doesnt_see_disable_platform_admin(client_request):
client_request.get('main.user_profile_disable_platform_admin_view', _expected_status=403)
def test_platform_admin_can_see_disable_platform_admin_page(client_request, platform_admin_user):
client_request.login(platform_admin_user)
page = client_request.get('main.user_profile_disable_platform_admin_view')
assert page.select_one('h1').text.strip() == 'Use platform admin view'
assert page.select_one('input[checked]')['value'] == 'True'
def test_can_disable_platform_admin(client_request, platform_admin_user):
client_request.login(platform_admin_user)
with client_request.session_transaction() as session:
assert 'disable_platform_admin_view' not in session
client_request.post(
'main.user_profile_disable_platform_admin_view',
_data={'enabled': False},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
with client_request.session_transaction() as session:
assert session['disable_platform_admin_view'] is True
def test_can_reenable_platform_admin(client_request, platform_admin_user):
client_request.login(platform_admin_user)
with client_request.session_transaction() as session:
session['disable_platform_admin_view'] = True
client_request.post(
'main.user_profile_disable_platform_admin_view',
_data={'enabled': True},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
with client_request.session_transaction() as session:
assert session['disable_platform_admin_view'] is False
def test_user_doesnt_see_security_keys_unless_they_can_use_webauthn(
client_request,
platform_admin_user
):
platform_admin_user['can_use_webauthn'] = False
client_request.login(platform_admin_user)
client_request.get(
'.user_profile_security_keys',
_expected_status=403,
)
def test_should_show_security_keys_page(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_security_keys')
assert page.select_one('h1').text.strip() == 'Security keys'
credential_row = page.select('tr')[-1]
assert 'Test credential' in credential_row.text
assert "Manage" in credential_row.find('a').text
assert credential_row.find('a')["href"] == url_for(
'.user_profile_manage_security_key',
key_id=webauthn_credential['id']
)
register_button = page.select_one("[data-module='register-security-key']")
assert register_button.text.strip() == 'Register a key'
def test_get_key_from_list_of_keys(
mocker,
webauthn_credential,
webauthn_credential_2,
fake_uuid,
):
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential, webauthn_credential_2],
)
assert WebAuthnCredentials(fake_uuid).by_id(webauthn_credential["id"]) == WebAuthnCredential(webauthn_credential)
def test_should_show_manage_security_key_page(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_manage_security_key', key_id=webauthn_credential['id'])
assert page.select_one('h1').text.strip() == f'Manage ‘{webauthn_credential['name']}’'
assert page.select_one('.govuk-back-link').text.strip() == 'Back'
assert page.select_one('.govuk-back-link')['href'] == url_for('.user_profile_security_keys')
assert page.select_one('#security_key_name')["value"] == webauthn_credential["name"]
def test_manage_security_key_page_404s_when_key_not_found(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
webauthn_credential_2
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential_2],
)
client_request.get(
'.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_expected_status=404,
)
@pytest.mark.parametrize('endpoint,method', [
(".user_profile_manage_security_key", "get"),
(".user_profile_manage_security_key", "post"),
(".user_profile_confirm_delete_security_key", "get"),
(".user_profile_confirm_delete_security_key", "post"),
(".user_profile_delete_security_key", "post"),
])
def test_cant_manage_security_keys_unless_can_use_webauthn(
client_request,
platform_admin_user,
webauthn_credential,
endpoint,
method
):
platform_admin_user['can_use_webauthn'] = False
client_request.login(platform_admin_user)
if method == "get":
client_request.get(
endpoint,
key_id=webauthn_credential['id'],
_expected_status=403,
)
else:
client_request.post(
endpoint,
key_id=webauthn_credential['id'],
_expected_status=403,
)
def test_should_redirect_after_change_of_security_key_name(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mock_update = mocker.patch('app.user_api_client.update_webauthn_credential_name_for_user')
client_request.post(
'main.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_data={'security_key_name': "new name"},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_security_keys',
_external=True,
)
)
mock_update.assert_called_once_with(
credential_id=webauthn_credential['id'],
new_name_for_credential="new name",
user_id=platform_admin_user["id"]
)
def test_user_profile_manage_security_key_should_not_call_api_if_key_name_stays_the_same(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mock_update = mocker.patch('app.user_api_client.update_webauthn_credential_name_for_user')
client_request.post(
'main.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_data={'security_key_name': webauthn_credential['name']},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_security_keys',
_external=True,
)
)
assert not mock_update.called
def test_shows_delete_link_for_security_key(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_manage_security_key', key_id=webauthn_credential['id'])
assert page.select_one('h1').text.strip() == f'Manage ‘{webauthn_credential['name']}’'
link = page.select_one('.page-footer a')
assert normalize_spaces(link.text) == 'Delete'
assert link['href'] == url_for('.user_profile_confirm_delete_security_key', key_id=webauthn_credential['id'])
def test_confirm_delete_security_key(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get(
'.user_profile_confirm_delete_security_key',
key_id=webauthn_credential['id'],
_test_page_title=False,
)
assert normalize_spaces(page.select_one('.banner-dangerous').text) == (
'Are you sure you want to delete this security key? '
'Yes, delete'
)
assert 'action' not in page.select_one('.banner-dangerous form')
assert page.select_one('.banner-dangerous form')['method'] == 'post'
def test_delete_security_key(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mock_delete = mocker.patch('app.user_api_client.delete_webauthn_credential_for_user')
client_request.post(
'.user_profile_delete_security_key',
key_id=webauthn_credential['id'],
_expected_redirect=url_for(
'.user_profile_security_keys',
_external=True,
)
)
mock_delete.assert_called_once_with(
credential_id=webauthn_credential['id'],
user_id=platform_admin_user["id"]
)
def test_delete_security_key_handles_last_credential_error(
client_request,
platform_admin_user,
webauthn_credential,
mocker,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mocker.patch(
'app.user_api_client.delete_webauthn_credential_for_user',
side_effect=HTTPError(
response={},
message='Cannot delete last remaining webauthn credential for user'
)
)
page = client_request.post(
'.user_profile_delete_security_key',
key_id=webauthn_credential['id'],
_follow_redirects=True
)
assert 'Manage ‘Test credential’' in page.find('h1').text
expected_message = "You cannot delete your last security key."
assert expected_message in page.find('div', class_="banner-dangerous").text
| import json
import uuid
import pytest
from flask import url_for
from notifications_python_client.errors import HTTPError
from notifications_utils.url_safe_token import generate_token
from app.models.webauthn_credential import (
WebAuthnCredential,
WebAuthnCredentials,
)
from tests.conftest import (
create_api_user_active,
normalize_spaces,
url_for_endpoint_with_token,
)
def test_should_show_overview_page(
client_request,
):
page = client_request.get('main.user_profile')
assert page.select_one('h1').text.strip() == 'Your profile'
assert 'Use platform admin view' not in page
assert 'Security keys' not in page
def test_overview_page_shows_disable_for_platform_admin(
client_request,
platform_admin_user,
mocker
):
mocker.patch('app.models.webauthn_credential.WebAuthnCredentials.client_method')
client_request.login(platform_admin_user)
page = client_request.get('main.user_profile')
assert page.select_one('h1').text.strip() == 'Your profile'
disable_platform_admin_row = page.select_one('#disable-platform-admin')
assert ' '.join(disable_platform_admin_row.text.split()) == \
'Use platform admin view Yes Change whether to use platform admin view'
@pytest.mark.parametrize('key_count, expected_row_text', [
(0, 'Security keys None registered Change security keys'),
(1, 'Security keys 1 registered Change security keys'),
(2, 'Security keys 2 registered Change security keys'),
])
def test_overview_page_shows_security_keys_if_user_they_can_use_webauthn(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
key_count,
expected_row_text,
):
client_request.login(platform_admin_user)
credentials = [webauthn_credential for _ in range(key_count)]
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=credentials,
)
page = client_request.get('main.user_profile')
security_keys_row = page.select_one('#security-keys')
assert ' '.join(security_keys_row.text.split()) == expected_row_text
def test_should_show_name_page(
client_request
):
page = client_request.get(('main.user_profile_name'))
assert page.select_one('h1').text.strip() == 'Change your name'
def test_should_redirect_after_name_change(
client_request,
mock_update_user_attribute,
):
client_request.post(
'main.user_profile_name',
_data={'new_name': 'New Name'},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
assert mock_update_user_attribute.called is True
def test_should_show_email_page(
client_request,
):
page = client_request.get(
'main.user_profile_email'
)
assert page.select_one('h1').text.strip() == 'Change your email address'
def test_should_redirect_after_email_change(
client_request,
mock_login,
mock_email_is_not_already_in_use,
):
client_request.post(
'main.user_profile_email',
_data={'email_address': 'new_notify@notify.gov.uk'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_email_authenticate',
_external=True,
)
)
assert mock_email_is_not_already_in_use.called
@pytest.mark.parametrize('email_address,error_message', [
('me@example.com', 'Enter a public sector email address or find out who can use Notify'),
('not_valid', 'Enter a valid email address') # 2 errors with email address, only first error shown
])
def test_should_show_errors_if_new_email_address_does_not_validate(
client_request,
mock_email_is_not_already_in_use,
mock_get_organisations,
email_address,
error_message,
):
page = client_request.post(
'main.user_profile_email',
_data={'email_address': email_address},
_expected_status=200,
)
assert normalize_spaces(page.find('span', class_='govuk-error-message').text) == f'Error: {error_message}'
# We only call API to check if the email address is already in use if there are no other errors
assert not mock_email_is_not_already_in_use.called
def test_should_show_authenticate_after_email_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-email'] = 'new_notify@notify.gov.uk'
page = client_request.get('main.user_profile_email_authenticate')
assert 'Change your email address' in page.text
assert 'Confirm' in page.text
def test_should_render_change_email_continue_after_authenticate_email(
client_request,
mock_verify_password,
mock_send_change_email_verification,
):
with client_request.session_transaction() as session:
session['new-email'] = 'new_notify@notify.gov.uk'
page = client_request.post(
'main.user_profile_email_authenticate',
_data={'password': '12345'},
_expected_status=200,
)
assert 'Click the link in the email to confirm the change to your email address.' in page.text
def test_should_redirect_to_user_profile_when_user_confirms_email_link(
notify_admin,
logged_in_client,
api_user_active,
mock_update_user_attribute,
):
token = generate_token(payload=json.dumps({'user_id': api_user_active['id'], 'email': 'new_email@gov.uk'}),
secret=notify_admin.config['SECRET_KEY'], salt=notify_admin.config['DANGEROUS_SALT'])
response = logged_in_client.get(url_for_endpoint_with_token('main.user_profile_email_confirm',
token=token))
assert response.status_code == 302
assert response.location == url_for('main.user_profile', _external=True)
def test_should_show_mobile_number_page(
client_request,
):
page = client_request.get(('main.user_profile_mobile_number'))
assert 'Change your mobile number' in page.text
@pytest.mark.parametrize('phone_number_to_register_with', [
'+4407700900460',
'+1800-555-555',
])
def test_should_redirect_after_mobile_number_change(
client_request,
phone_number_to_register_with,
):
client_request.post(
'main.user_profile_mobile_number',
_data={'mobile_number': phone_number_to_register_with},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_mobile_number_authenticate',
_external=True,
)
)
with client_request.session_transaction() as session:
assert session['new-mob'] == phone_number_to_register_with
def test_should_show_authenticate_after_mobile_number_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-mob'] = '+441234123123'
page = client_request.get(
'main.user_profile_mobile_number_authenticate',
)
assert 'Change your mobile number' in page.text
assert 'Confirm' in page.text
def test_should_redirect_after_mobile_number_authenticate(
client_request,
mock_verify_password,
mock_send_verify_code,
):
with client_request.session_transaction() as session:
session['new-mob'] = '+441234123123'
client_request.post(
'main.user_profile_mobile_number_authenticate',
_data={'password': '12345667'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_mobile_number_confirm',
_external=True,
)
)
def test_should_show_confirm_after_mobile_number_change(
client_request,
):
with client_request.session_transaction() as session:
session['new-mob-password-confirmed'] = True
page = client_request.get(
'main.user_profile_mobile_number_confirm'
)
assert 'Change your mobile number' in page.text
assert 'Confirm' in page.text
@pytest.mark.parametrize('phone_number_to_register_with', [
'+4407700900460',
'+1800-555-555',
])
def test_should_redirect_after_mobile_number_confirm(
client_request,
mocker,
mock_update_user_attribute,
mock_check_verify_code,
phone_number_to_register_with,
):
user_before = create_api_user_active(with_unique_id=True)
user_after = create_api_user_active(with_unique_id=True)
user_before['current_session_id'] = str(uuid.UUID(int=1))
user_after['current_session_id'] = str(uuid.UUID(int=2))
# first time (login decorator) return normally, second time (after 2FA return with new session id)
mocker.patch('app.user_api_client.get_user', side_effect=[user_before, user_after])
with client_request.session_transaction() as session:
session['new-mob-password-confirmed'] = True
session['new-mob'] = phone_number_to_register_with
session['current_session_id'] = user_before['current_session_id']
client_request.post(
'main.user_profile_mobile_number_confirm',
_data={'sms_code': '12345'},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile',
_external=True,
)
)
# make sure the current_session_id has changed to what the API returned
with client_request.session_transaction() as session:
assert session['current_session_id'] == user_after['current_session_id']
def test_should_show_password_page(
client_request,
):
page = client_request.get(('main.user_profile_password'))
assert page.select_one('h1').text.strip() == 'Change your password'
def test_should_redirect_after_password_change(
client_request,
mock_update_user_password,
mock_verify_password,
):
client_request.post(
'main.user_profile_password',
_data={
'new_password': 'the new password',
'old_password': 'the old password',
},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile',
_external=True,
),
)
def test_non_gov_user_cannot_see_change_email_link(
client_request,
api_nongov_user_active,
mock_get_organisations,
):
client_request.login(api_nongov_user_active)
page = client_request.get('main.user_profile')
assert not page.find('a', {'href': url_for('main.user_profile_email')})
assert page.select_one('h1').text.strip() == 'Your profile'
def test_non_gov_user_cannot_access_change_email_page(
client_request,
api_nongov_user_active,
mock_get_organisations,
):
client_request.login(api_nongov_user_active)
client_request.get('main.user_profile_email', _expected_status=403)
def test_normal_user_doesnt_see_disable_platform_admin(client_request):
client_request.get('main.user_profile_disable_platform_admin_view', _expected_status=403)
def test_platform_admin_can_see_disable_platform_admin_page(client_request, platform_admin_user):
client_request.login(platform_admin_user)
page = client_request.get('main.user_profile_disable_platform_admin_view')
assert page.select_one('h1').text.strip() == 'Use platform admin view'
assert page.select_one('input[checked]')['value'] == 'True'
def test_can_disable_platform_admin(client_request, platform_admin_user):
client_request.login(platform_admin_user)
with client_request.session_transaction() as session:
assert 'disable_platform_admin_view' not in session
client_request.post(
'main.user_profile_disable_platform_admin_view',
_data={'enabled': False},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
with client_request.session_transaction() as session:
assert session['disable_platform_admin_view'] is True
def test_can_reenable_platform_admin(client_request, platform_admin_user):
client_request.login(platform_admin_user)
with client_request.session_transaction() as session:
session['disable_platform_admin_view'] = True
client_request.post(
'main.user_profile_disable_platform_admin_view',
_data={'enabled': True},
_expected_status=302,
_expected_redirect=url_for('main.user_profile', _external=True),
)
with client_request.session_transaction() as session:
assert session['disable_platform_admin_view'] is False
def test_user_doesnt_see_security_keys_unless_they_can_use_webauthn(
client_request,
platform_admin_user
):
platform_admin_user['can_use_webauthn'] = False
client_request.login(platform_admin_user)
client_request.get(
'.user_profile_security_keys',
_expected_status=403,
)
def test_should_show_security_keys_page(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_security_keys')
assert page.select_one('h1').text.strip() == 'Security keys'
credential_row = page.select('tr')[-1]
assert 'Test credential' in credential_row.text
assert "Manage" in credential_row.find('a').text
assert credential_row.find('a')["href"] == url_for(
'.user_profile_manage_security_key',
key_id=webauthn_credential['id']
)
register_button = page.select_one("[data-module='register-security-key']")
assert register_button.text.strip() == 'Register a key'
def test_get_key_from_list_of_keys(
mocker,
webauthn_credential,
webauthn_credential_2,
fake_uuid,
):
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential, webauthn_credential_2],
)
assert WebAuthnCredentials(fake_uuid).by_id(webauthn_credential["id"]) == WebAuthnCredential(webauthn_credential)
def test_should_show_manage_security_key_page(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_manage_security_key', key_id=webauthn_credential['id'])
assert page.select_one('h1').text.strip() == f'Manage ‘{webauthn_credential["name"]}’'
assert page.select_one('.govuk-back-link').text.strip() == 'Back'
assert page.select_one('.govuk-back-link')['href'] == url_for('.user_profile_security_keys')
assert page.select_one('#security_key_name')["value"] == webauthn_credential["name"]
def test_manage_security_key_page_404s_when_key_not_found(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
webauthn_credential_2
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential_2],
)
client_request.get(
'.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_expected_status=404,
)
@pytest.mark.parametrize('endpoint,method', [
(".user_profile_manage_security_key", "get"),
(".user_profile_manage_security_key", "post"),
(".user_profile_confirm_delete_security_key", "get"),
(".user_profile_confirm_delete_security_key", "post"),
(".user_profile_delete_security_key", "post"),
])
def test_cant_manage_security_keys_unless_can_use_webauthn(
client_request,
platform_admin_user,
webauthn_credential,
endpoint,
method
):
platform_admin_user['can_use_webauthn'] = False
client_request.login(platform_admin_user)
if method == "get":
client_request.get(
endpoint,
key_id=webauthn_credential['id'],
_expected_status=403,
)
else:
client_request.post(
endpoint,
key_id=webauthn_credential['id'],
_expected_status=403,
)
def test_should_redirect_after_change_of_security_key_name(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mock_update = mocker.patch('app.user_api_client.update_webauthn_credential_name_for_user')
client_request.post(
'main.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_data={'security_key_name': "new name"},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_security_keys',
_external=True,
)
)
mock_update.assert_called_once_with(
credential_id=webauthn_credential['id'],
new_name_for_credential="new name",
user_id=platform_admin_user["id"]
)
def test_user_profile_manage_security_key_should_not_call_api_if_key_name_stays_the_same(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mock_update = mocker.patch('app.user_api_client.update_webauthn_credential_name_for_user')
client_request.post(
'main.user_profile_manage_security_key',
key_id=webauthn_credential['id'],
_data={'security_key_name': webauthn_credential['name']},
_expected_status=302,
_expected_redirect=url_for(
'main.user_profile_security_keys',
_external=True,
)
)
assert not mock_update.called
def test_shows_delete_link_for_security_key(
mocker,
client_request,
platform_admin_user,
webauthn_credential,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get('.user_profile_manage_security_key', key_id=webauthn_credential['id'])
assert page.select_one('h1').text.strip() == f'Manage ‘{webauthn_credential["name"]}’'
link = page.select_one('.page-footer a')
assert normalize_spaces(link.text) == 'Delete'
assert link['href'] == url_for('.user_profile_confirm_delete_security_key', key_id=webauthn_credential['id'])
def test_confirm_delete_security_key(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
page = client_request.get(
'.user_profile_confirm_delete_security_key',
key_id=webauthn_credential['id'],
_test_page_title=False,
)
assert normalize_spaces(page.select_one('.banner-dangerous').text) == (
'Are you sure you want to delete this security key? '
'Yes, delete'
)
assert 'action' not in page.select_one('.banner-dangerous form')
assert page.select_one('.banner-dangerous form')['method'] == 'post'
def test_delete_security_key(
client_request,
platform_admin_user,
webauthn_credential,
mocker
):
client_request.login(platform_admin_user)
mock_delete = mocker.patch('app.user_api_client.delete_webauthn_credential_for_user')
client_request.post(
'.user_profile_delete_security_key',
key_id=webauthn_credential['id'],
_expected_redirect=url_for(
'.user_profile_security_keys',
_external=True,
)
)
mock_delete.assert_called_once_with(
credential_id=webauthn_credential['id'],
user_id=platform_admin_user["id"]
)
def test_delete_security_key_handles_last_credential_error(
client_request,
platform_admin_user,
webauthn_credential,
mocker,
):
client_request.login(platform_admin_user)
mocker.patch(
'app.models.webauthn_credential.WebAuthnCredentials.client_method',
return_value=[webauthn_credential],
)
mocker.patch(
'app.user_api_client.delete_webauthn_credential_for_user',
side_effect=HTTPError(
response={},
message='Cannot delete last remaining webauthn credential for user'
)
)
page = client_request.post(
'.user_profile_delete_security_key',
key_id=webauthn_credential['id'],
_follow_redirects=True
)
assert 'Manage ‘Test credential’' in page.find('h1').text
expected_message = "You cannot delete your last security key."
assert expected_message in page.find('div', class_="banner-dangerous").text
|
import logging
import uuid
from datetime import datetime, timedelta
from enum import Enum, auto
from json import JSONEncoder
from AllInOneInstagramBot.core.utils import get_value
logger = logging.getLogger(__name__)
class SessionState:
id = None
args = {}
my_username = None
my_posts_count = None
my_followers_count = None
my_following_count = None
totalInteractions = {}
successfulInteractions = {}
totalFollowed = {}
totalLikes = 0
totalComments = 0
totalPm = 0
totalWatched = 0
totalUnfollowed = 0
removedMassFollowers = []
totalScraped = 0
totalCrashes = 0
startTime = None
finishTime = None
def __init__(self, configs):
self.id = str(uuid.uuid4())
self.args = configs.args
self.my_username = None
self.my_posts_count = None
self.my_followers_count = None
self.my_following_count = None
self.totalInteractions = {}
self.successfulInteractions = {}
self.totalFollowed = {}
self.totalLikes = 0
self.totalComments = 0
self.totalPm = 0
self.totalWatched = 0
self.totalUnfollowed = 0
self.removedMassFollowers = []
self.totalScraped = {}
self.totalCrashes = 0
self.startTime = datetime.now()
self.finishTime = None
def add_interaction(self, source, succeed, followed, scraped):
if self.totalInteractions.get(source) is None:
self.totalInteractions[source] = 1
else:
self.totalInteractions[source] += 1
if self.successfulInteractions.get(source) is None:
self.successfulInteractions[source] = 1 if succeed else 0
else:
if succeed:
self.successfulInteractions[source] += 1
if self.totalFollowed.get(source) is None:
self.totalFollowed[source] = 1 if followed else 0
else:
if followed:
self.totalFollowed[source] += 1
if self.totalScraped.get(source) is None:
self.totalScraped[source] = 1 if scraped else 0
self.successfulInteractions[source] = 1 if scraped else 0
else:
if scraped:
self.totalScraped[source] += 1
self.successfulInteractions[source] += 1
def set_limits_session(
self,
):
"""set the limits for current session"""
self.args.current_likes_limit = get_value(
self.args.total_likes_limit, None, 300
)
self.args.current_follow_limit = get_value(
self.args.total_follows_limit, None, 50
)
self.args.current_unfollow_limit = get_value(
self.args.total_unfollows_limit, None, 50
)
self.args.current_comments_limit = get_value(
self.args.total_comments_limit, None, 10
)
self.args.current_pm_limit = get_value(self.args.total_pm_limit, None, 10)
self.args.current_watch_limit = get_value(
self.args.total_watches_limit, None, 50
)
self.args.current_success_limit = get_value(
self.args.total_successful_interactions_limit, None, 100
)
self.args.current_total_limit = get_value(
self.args.total_interactions_limit, None, 1000
)
self.args.current_scraped_limit = get_value(
self.args.total_scraped_limit, None, 200
)
self.args.current_crashes_limit = get_value(
self.args.total_crashes_limit, None, 5
)
def check_limit(self, limit_type=None, output=False):
"""Returns True if limit reached - else False"""
limit_type = SessionState.Limit.ALL if limit_type is None else limit_type
# check limits
total_likes = self.totalLikes >= int(self.args.current_likes_limit)
total_followed = sum(self.totalFollowed.values()) >= int(
self.args.current_follow_limit
)
total_unfollowed = self.totalUnfollowed >= int(self.args.current_unfollow_limit)
total_comments = self.totalComments >= int(self.args.current_comments_limit)
total_pm = self.totalPm >= int(self.args.current_pm_limit)
total_watched = self.totalWatched >= int(self.args.current_watch_limit)
total_successful = sum(self.successfulInteractions.values()) >= int(
self.args.current_success_limit
)
total_interactions = sum(self.totalInteractions.values()) >= int(
self.args.current_total_limit
)
total_scraped = sum(self.totalScraped.values()) >= int(
self.args.current_scraped_limit
)
total_crashes = self.totalCrashes >= int(self.args.current_crashes_limit)
session_info = [
"Checking session limits:",
f"- Total Likes:\t\t\t\t{"Limit Reached" if total_likes else "OK"} ({self.totalLikes}/{self.args.current_likes_limit})",
f"- Total Comments:\t\t\t\t{"Limit Reached" if total_comments else "OK"} ({self.totalComments}/{self.args.current_comments_limit})",
f"- Total PM:\t\t\t\t\t{"Limit Reached" if total_pm else "OK"} ({self.totalPm}/{self.args.current_pm_limit})",
f"- Total Followed:\t\t\t\t{"Limit Reached" if total_followed else "OK"} ({sum(self.totalFollowed.values())}/{self.args.current_follow_limit})",
f"- Total Unfollowed:\t\t\t\t{"Limit Reached" if total_unfollowed else "OK"} ({self.totalUnfollowed}/{self.args.current_unfollow_limit})",
f"- Total Watched:\t\t\t\t{"Limit Reached" if total_watched else "OK"} ({self.totalWatched}/{self.args.current_watch_limit})",
f"- Total Successful Interactions:\t\t{"Limit Reached" if total_successful else "OK"} ({sum(self.successfulInteractions.values())}/{self.args.current_success_limit})",
f"- Total Interactions:\t\t\t{"Limit Reached" if total_interactions else "OK"} ({sum(self.totalInteractions.values())}/{self.args.current_total_limit})",
f"- Total Crashes:\t\t\t\t{"Limit Reached" if total_crashes else "OK"} ({self.totalCrashes}/{self.args.current_crashes_limit})",
f"- Total Successful Scraped Users:\t\t{"Limit Reached" if total_scraped else "OK"} ({sum(self.totalScraped.values())}/{self.args.current_scraped_limit})",
]
if limit_type == SessionState.Limit.ALL:
if output is not None:
if output:
for line in session_info:
logger.info(line)
else:
for line in session_info:
logger.debug(line)
return (
total_likes
and self.args.end_if_likes_limit_reached
or total_followed
and self.args.end_if_follows_limit_reached
or total_watched
and self.args.end_if_watches_limit_reached
or total_comments
and self.args.end_if_comments_limit_reached
or total_pm
and self.args.end_if_pm_limit_reached,
total_unfollowed,
total_interactions or total_successful or total_scraped,
)
elif limit_type == SessionState.Limit.LIKES:
if output:
logger.info(session_info[1])
else:
logger.debug(session_info[1])
return total_likes
elif limit_type == SessionState.Limit.COMMENTS:
if output:
logger.info(session_info[2])
else:
logger.debug(session_info[2])
return total_comments
elif limit_type == SessionState.Limit.PM:
if output:
logger.info(session_info[3])
else:
logger.debug(session_info[3])
return total_pm
elif limit_type == SessionState.Limit.FOLLOWS:
if output:
logger.info(session_info[4])
else:
logger.debug(session_info[4])
return total_followed
elif limit_type == SessionState.Limit.UNFOLLOWS:
if output:
logger.info(session_info[5])
else:
logger.debug(session_info[5])
return total_unfollowed
elif limit_type == SessionState.Limit.WATCHES:
if output:
logger.info(session_info[6])
else:
logger.debug(session_info[6])
return total_watched
elif limit_type == SessionState.Limit.SUCCESS:
if output:
logger.info(session_info[7])
else:
logger.debug(session_info[7])
return total_successful
elif limit_type == SessionState.Limit.TOTAL:
if output:
logger.info(session_info[8])
else:
logger.debug(session_info[8])
return total_interactions
elif limit_type == SessionState.Limit.CRASHES:
if output:
logger.info(session_info[9])
else:
logger.debug(session_info[9])
return total_crashes
elif limit_type == SessionState.Limit.SCRAPED:
if output:
logger.info(session_info[10])
else:
logger.debug(session_info[10])
return total_scraped
@staticmethod
def inside_working_hours(working_hours, delta_sec):
def time_in_range(start, end, x):
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
in_range = False
time_left_list = []
current_time = datetime.now()
delta = timedelta(seconds=delta_sec)
for n in working_hours:
today = current_time.strftime("%Y-%m-%d")
inf_value = f"{n.split("-")[0]} {today}"
inf = datetime.strptime(inf_value, "%H.%M %Y-%m-%d") + delta
sup_value = f"{n.split("-")[1]} {today}"
sup = datetime.strptime(sup_value, "%H.%M %Y-%m-%d") + delta
if sup - inf + timedelta(minutes=1) == timedelta(
days=1
) or sup - inf + timedelta(minutes=1) == timedelta(days=0):
logger.debug("Whole day mode.")
return True, 0
if time_in_range(inf.time(), sup.time(), current_time.time()):
in_range = True
return in_range, 0
else:
time_left = inf - current_time
if time_left >= timedelta(0):
time_left_list.append(time_left)
else:
time_left_list.append(time_left + timedelta(days=1))
return (
in_range,
min(time_left_list) if len(time_left_list) > 1 else time_left_list[0],
)
def is_finished(self):
return self.finishTime is not None
class Limit(Enum):
ALL = auto()
LIKES = auto()
COMMENTS = auto()
PM = auto()
FOLLOWS = auto()
UNFOLLOWS = auto()
WATCHES = auto()
SUCCESS = auto()
TOTAL = auto()
SCRAPED = auto()
CRASHES = auto()
class SessionStateEncoder(JSONEncoder):
def default(self, session_state: SessionState):
return {
"id": session_state.id,
"total_interactions": sum(session_state.totalInteractions.values()),
"successful_interactions": sum(
session_state.successfulInteractions.values()
),
"total_followed": sum(session_state.totalFollowed.values()),
"total_likes": session_state.totalLikes,
"total_comments": session_state.totalComments,
"total_pm": session_state.totalPm,
"total_watched": session_state.totalWatched,
"total_unfollowed": session_state.totalUnfollowed,
"total_scraped": session_state.totalScraped,
"start_time": str(session_state.startTime),
"finish_time": str(session_state.finishTime),
"args": session_state.args.__dict__,
"profile": {
"posts": session_state.my_posts_count,
"followers": session_state.my_followers_count,
"following": session_state.my_following_count,
},
}
| import logging
import uuid
from datetime import datetime, timedelta
from enum import Enum, auto
from json import JSONEncoder
from AllInOneInstagramBot.core.utils import get_value
logger = logging.getLogger(__name__)
class SessionState:
id = None
args = {}
my_username = None
my_posts_count = None
my_followers_count = None
my_following_count = None
totalInteractions = {}
successfulInteractions = {}
totalFollowed = {}
totalLikes = 0
totalComments = 0
totalPm = 0
totalWatched = 0
totalUnfollowed = 0
removedMassFollowers = []
totalScraped = 0
totalCrashes = 0
startTime = None
finishTime = None
def __init__(self, configs):
self.id = str(uuid.uuid4())
self.args = configs.args
self.my_username = None
self.my_posts_count = None
self.my_followers_count = None
self.my_following_count = None
self.totalInteractions = {}
self.successfulInteractions = {}
self.totalFollowed = {}
self.totalLikes = 0
self.totalComments = 0
self.totalPm = 0
self.totalWatched = 0
self.totalUnfollowed = 0
self.removedMassFollowers = []
self.totalScraped = {}
self.totalCrashes = 0
self.startTime = datetime.now()
self.finishTime = None
def add_interaction(self, source, succeed, followed, scraped):
if self.totalInteractions.get(source) is None:
self.totalInteractions[source] = 1
else:
self.totalInteractions[source] += 1
if self.successfulInteractions.get(source) is None:
self.successfulInteractions[source] = 1 if succeed else 0
else:
if succeed:
self.successfulInteractions[source] += 1
if self.totalFollowed.get(source) is None:
self.totalFollowed[source] = 1 if followed else 0
else:
if followed:
self.totalFollowed[source] += 1
if self.totalScraped.get(source) is None:
self.totalScraped[source] = 1 if scraped else 0
self.successfulInteractions[source] = 1 if scraped else 0
else:
if scraped:
self.totalScraped[source] += 1
self.successfulInteractions[source] += 1
def set_limits_session(
self,
):
"""set the limits for current session"""
self.args.current_likes_limit = get_value(
self.args.total_likes_limit, None, 300
)
self.args.current_follow_limit = get_value(
self.args.total_follows_limit, None, 50
)
self.args.current_unfollow_limit = get_value(
self.args.total_unfollows_limit, None, 50
)
self.args.current_comments_limit = get_value(
self.args.total_comments_limit, None, 10
)
self.args.current_pm_limit = get_value(self.args.total_pm_limit, None, 10)
self.args.current_watch_limit = get_value(
self.args.total_watches_limit, None, 50
)
self.args.current_success_limit = get_value(
self.args.total_successful_interactions_limit, None, 100
)
self.args.current_total_limit = get_value(
self.args.total_interactions_limit, None, 1000
)
self.args.current_scraped_limit = get_value(
self.args.total_scraped_limit, None, 200
)
self.args.current_crashes_limit = get_value(
self.args.total_crashes_limit, None, 5
)
def check_limit(self, limit_type=None, output=False):
"""Returns True if limit reached - else False"""
limit_type = SessionState.Limit.ALL if limit_type is None else limit_type
# check limits
total_likes = self.totalLikes >= int(self.args.current_likes_limit)
total_followed = sum(self.totalFollowed.values()) >= int(
self.args.current_follow_limit
)
total_unfollowed = self.totalUnfollowed >= int(self.args.current_unfollow_limit)
total_comments = self.totalComments >= int(self.args.current_comments_limit)
total_pm = self.totalPm >= int(self.args.current_pm_limit)
total_watched = self.totalWatched >= int(self.args.current_watch_limit)
total_successful = sum(self.successfulInteractions.values()) >= int(
self.args.current_success_limit
)
total_interactions = sum(self.totalInteractions.values()) >= int(
self.args.current_total_limit
)
total_scraped = sum(self.totalScraped.values()) >= int(
self.args.current_scraped_limit
)
total_crashes = self.totalCrashes >= int(self.args.current_crashes_limit)
session_info = [
"Checking session limits:",
f"- Total Likes:\t\t\t\t{'Limit Reached' if total_likes else 'OK'} ({self.totalLikes}/{self.args.current_likes_limit})",
f"- Total Comments:\t\t\t\t{'Limit Reached' if total_comments else 'OK'} ({self.totalComments}/{self.args.current_comments_limit})",
f"- Total PM:\t\t\t\t\t{'Limit Reached' if total_pm else 'OK'} ({self.totalPm}/{self.args.current_pm_limit})",
f"- Total Followed:\t\t\t\t{'Limit Reached' if total_followed else 'OK'} ({sum(self.totalFollowed.values())}/{self.args.current_follow_limit})",
f"- Total Unfollowed:\t\t\t\t{'Limit Reached' if total_unfollowed else 'OK'} ({self.totalUnfollowed}/{self.args.current_unfollow_limit})",
f"- Total Watched:\t\t\t\t{'Limit Reached' if total_watched else 'OK'} ({self.totalWatched}/{self.args.current_watch_limit})",
f"- Total Successful Interactions:\t\t{'Limit Reached' if total_successful else 'OK'} ({sum(self.successfulInteractions.values())}/{self.args.current_success_limit})",
f"- Total Interactions:\t\t\t{'Limit Reached' if total_interactions else 'OK'} ({sum(self.totalInteractions.values())}/{self.args.current_total_limit})",
f"- Total Crashes:\t\t\t\t{'Limit Reached' if total_crashes else 'OK'} ({self.totalCrashes}/{self.args.current_crashes_limit})",
f"- Total Successful Scraped Users:\t\t{'Limit Reached' if total_scraped else 'OK'} ({sum(self.totalScraped.values())}/{self.args.current_scraped_limit})",
]
if limit_type == SessionState.Limit.ALL:
if output is not None:
if output:
for line in session_info:
logger.info(line)
else:
for line in session_info:
logger.debug(line)
return (
total_likes
and self.args.end_if_likes_limit_reached
or total_followed
and self.args.end_if_follows_limit_reached
or total_watched
and self.args.end_if_watches_limit_reached
or total_comments
and self.args.end_if_comments_limit_reached
or total_pm
and self.args.end_if_pm_limit_reached,
total_unfollowed,
total_interactions or total_successful or total_scraped,
)
elif limit_type == SessionState.Limit.LIKES:
if output:
logger.info(session_info[1])
else:
logger.debug(session_info[1])
return total_likes
elif limit_type == SessionState.Limit.COMMENTS:
if output:
logger.info(session_info[2])
else:
logger.debug(session_info[2])
return total_comments
elif limit_type == SessionState.Limit.PM:
if output:
logger.info(session_info[3])
else:
logger.debug(session_info[3])
return total_pm
elif limit_type == SessionState.Limit.FOLLOWS:
if output:
logger.info(session_info[4])
else:
logger.debug(session_info[4])
return total_followed
elif limit_type == SessionState.Limit.UNFOLLOWS:
if output:
logger.info(session_info[5])
else:
logger.debug(session_info[5])
return total_unfollowed
elif limit_type == SessionState.Limit.WATCHES:
if output:
logger.info(session_info[6])
else:
logger.debug(session_info[6])
return total_watched
elif limit_type == SessionState.Limit.SUCCESS:
if output:
logger.info(session_info[7])
else:
logger.debug(session_info[7])
return total_successful
elif limit_type == SessionState.Limit.TOTAL:
if output:
logger.info(session_info[8])
else:
logger.debug(session_info[8])
return total_interactions
elif limit_type == SessionState.Limit.CRASHES:
if output:
logger.info(session_info[9])
else:
logger.debug(session_info[9])
return total_crashes
elif limit_type == SessionState.Limit.SCRAPED:
if output:
logger.info(session_info[10])
else:
logger.debug(session_info[10])
return total_scraped
@staticmethod
def inside_working_hours(working_hours, delta_sec):
def time_in_range(start, end, x):
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
in_range = False
time_left_list = []
current_time = datetime.now()
delta = timedelta(seconds=delta_sec)
for n in working_hours:
today = current_time.strftime("%Y-%m-%d")
inf_value = f"{n.split('-')[0]} {today}"
inf = datetime.strptime(inf_value, "%H.%M %Y-%m-%d") + delta
sup_value = f"{n.split('-')[1]} {today}"
sup = datetime.strptime(sup_value, "%H.%M %Y-%m-%d") + delta
if sup - inf + timedelta(minutes=1) == timedelta(
days=1
) or sup - inf + timedelta(minutes=1) == timedelta(days=0):
logger.debug("Whole day mode.")
return True, 0
if time_in_range(inf.time(), sup.time(), current_time.time()):
in_range = True
return in_range, 0
else:
time_left = inf - current_time
if time_left >= timedelta(0):
time_left_list.append(time_left)
else:
time_left_list.append(time_left + timedelta(days=1))
return (
in_range,
min(time_left_list) if len(time_left_list) > 1 else time_left_list[0],
)
def is_finished(self):
return self.finishTime is not None
class Limit(Enum):
ALL = auto()
LIKES = auto()
COMMENTS = auto()
PM = auto()
FOLLOWS = auto()
UNFOLLOWS = auto()
WATCHES = auto()
SUCCESS = auto()
TOTAL = auto()
SCRAPED = auto()
CRASHES = auto()
class SessionStateEncoder(JSONEncoder):
def default(self, session_state: SessionState):
return {
"id": session_state.id,
"total_interactions": sum(session_state.totalInteractions.values()),
"successful_interactions": sum(
session_state.successfulInteractions.values()
),
"total_followed": sum(session_state.totalFollowed.values()),
"total_likes": session_state.totalLikes,
"total_comments": session_state.totalComments,
"total_pm": session_state.totalPm,
"total_watched": session_state.totalWatched,
"total_unfollowed": session_state.totalUnfollowed,
"total_scraped": session_state.totalScraped,
"start_time": str(session_state.startTime),
"finish_time": str(session_state.finishTime),
"args": session_state.args.__dict__,
"profile": {
"posts": session_state.my_posts_count,
"followers": session_state.my_followers_count,
"following": session_state.my_following_count,
},
}
|
"""Indy ledger implementation."""
import asyncio
import json
import logging
import tempfile
from datetime import datetime, date
from hashlib import sha256
from os import path
from time import time
from typing import Sequence, Tuple
import indy.ledger
import indy.pool
from indy.error import IndyError, ErrorCode
from ..config.base import BaseInjector, BaseProvider, BaseSettings
from ..cache.base import BaseCache
from ..indy.issuer import IndyIssuer, IndyIssuerError, DEFAULT_CRED_DEF_TAG
from ..indy.sdk.error import IndyErrorHandler
from ..messaging.credential_definitions.util import CRED_DEF_SENT_RECORD_TYPE
from ..messaging.schemas.util import SCHEMA_SENT_RECORD_TYPE
from ..storage.base import StorageRecord
from ..storage.indy import IndySdkStorage
from ..utils import sentinel
from ..wallet.base import DIDInfo
from ..wallet.error import WalletNotFoundError
from ..wallet.indy import IndySdkWallet
from ..wallet.util import full_verkey
from ..wallet.did_posture import DIDPosture
from .base import BaseLedger, Role
from .endpoint_type import EndpointType
from .error import (
BadLedgerRequestError,
ClosedPoolError,
LedgerConfigError,
LedgerError,
LedgerTransactionError,
)
from .util import TAA_ACCEPTED_RECORD_TYPE
LOGGER = logging.getLogger(__name__)
GENESIS_TRANSACTION_FILE = "indy_genesis_transactions.txt"
class IndySdkLedgerPoolProvider(BaseProvider):
"""Indy ledger pool provider which keys off the selected pool name."""
def provide(self, settings: BaseSettings, injector: BaseInjector):
"""Create and open the pool instance."""
pool_name = settings.get("ledger.pool_name", "default")
keepalive = int(settings.get("ledger.keepalive", 5))
read_only = bool(settings.get("ledger.read_only", False))
if read_only:
LOGGER.warning("Note: setting ledger to read-only mode")
genesis_transactions = settings.get("ledger.genesis_transactions")
cache = injector.inject(BaseCache, required=False)
ledger_pool = IndySdkLedgerPool(
pool_name,
keepalive=keepalive,
cache=cache,
genesis_transactions=genesis_transactions,
read_only=read_only,
)
return ledger_pool
class IndySdkLedgerPool:
"""Indy ledger manager class."""
def __init__(
self,
name: str,
*,
checked: bool = False,
keepalive: int = 0,
cache: BaseCache = None,
cache_duration: int = 600,
genesis_transactions: str = None,
read_only: bool = False,
):
"""
Initialize an IndySdkLedgerPool instance.
Args:
name: The Indy pool ledger configuration name
keepalive: How many seconds to keep the ledger open
cache: The cache instance to use
cache_duration: The TTL for ledger cache entries
genesis_transactions: The ledger genesis transaction as a string
read_only: Prevent any ledger write operations
"""
self.checked = checked
self.opened = False
self.ref_count = 0
self.ref_lock = asyncio.Lock()
self.keepalive = keepalive
self.close_task: asyncio.Future = None
self.cache = cache
self.cache_duration = cache_duration
self.genesis_transactions = genesis_transactions
self.handle = None
self.name = name
self.taa_cache = None
self.read_only = read_only
async def create_pool_config(
self, genesis_transactions: str, recreate: bool = False
):
"""Create the pool ledger configuration."""
# indy-sdk requires a file to pass the pool configuration
# the file path includes the pool name to avoid conflicts
txn_path = path.join(
tempfile.gettempdir(), f"{self.name}_{GENESIS_TRANSACTION_FILE}"
)
with open(txn_path, "w") as genesis_file:
genesis_file.write(genesis_transactions)
pool_config = json.dumps({"genesis_txn": txn_path})
if await self.check_pool_config():
if recreate:
LOGGER.debug("Removing existing ledger config")
await indy.pool.delete_pool_ledger_config(self.name)
else:
raise LedgerConfigError(
"Ledger pool configuration already exists: %s", self.name
)
LOGGER.debug("Creating pool ledger config")
with IndyErrorHandler(
"Exception creating pool ledger config", LedgerConfigError
):
await indy.pool.create_pool_ledger_config(self.name, pool_config)
async def check_pool_config(self) -> bool:
"""Check if a pool config has been created."""
pool_names = {cfg["pool"] for cfg in await indy.pool.list_pools()}
return self.name in pool_names
async def open(self):
"""Open the pool ledger, creating it if necessary."""
if self.genesis_transactions:
await self.create_pool_config(self.genesis_transactions, True)
self.genesis_transactions = None
self.checked = True
elif not self.checked:
if not await self.check_pool_config():
raise LedgerError("Ledger pool configuration has not been created")
self.checked = True
# We only support proto ver 2
with IndyErrorHandler(
"Exception setting ledger protocol version", LedgerConfigError
):
await indy.pool.set_protocol_version(2)
with IndyErrorHandler(
f"Exception opening pool ledger {self.name}", LedgerConfigError
):
self.handle = await indy.pool.open_pool_ledger(self.name, "{}")
self.opened = True
async def close(self):
"""Close the pool ledger."""
if self.opened:
exc = None
for attempt in range(3):
try:
await indy.pool.close_pool_ledger(self.handle)
except IndyError as err:
await asyncio.sleep(0.01)
exc = err
continue
self.handle = None
self.opened = False
exc = None
break
if exc:
LOGGER.error("Exception closing pool ledger")
self.ref_count += 1 # if we are here, we should have self.ref_lock
self.close_task = None
raise IndyErrorHandler.wrap_error(
exc, "Exception closing pool ledger", LedgerError
)
async def context_open(self):
"""Open the ledger if necessary and increase the number of active references."""
async with self.ref_lock:
if self.close_task:
self.close_task.cancel()
if not self.opened:
LOGGER.debug("Opening the pool ledger")
await self.open()
self.ref_count += 1
async def context_close(self):
"""Release the reference and schedule closing of the pool ledger."""
async def closer(timeout: int):
"""Close the pool ledger after a timeout."""
await asyncio.sleep(timeout)
async with self.ref_lock:
if not self.ref_count:
LOGGER.debug("Closing pool ledger after timeout")
await self.close()
async with self.ref_lock:
self.ref_count -= 1
if not self.ref_count:
if self.keepalive:
self.close_task = asyncio.ensure_future(closer(self.keepalive))
else:
await self.close()
class IndySdkLedger(BaseLedger):
"""Indy ledger class."""
BACKEND_NAME = "indy"
def __init__(
self,
pool: IndySdkLedgerPool,
wallet: IndySdkWallet,
):
"""
Initialize an IndySdkLedger instance.
Args:
pool: The pool instance handling the raw ledger connection
wallet: The IndySdkWallet instance
"""
self.pool = pool
self.wallet = wallet
@property
def pool_handle(self):
"""Accessor for the ledger pool handle."""
return self.pool.handle
@property
def pool_name(self) -> str:
"""Accessor for the ledger pool name."""
return self.pool.name
@property
def read_only(self) -> bool:
"""Accessor for the ledger read-only flag."""
return self.pool.read_only
async def __aenter__(self) -> "IndySdkLedger":
"""
Context manager entry.
Returns:
The current instance
"""
await super().__aenter__()
await self.pool.context_open()
return self
async def __aexit__(self, exc_type, exc, tb):
"""Context manager exit."""
await self.pool.context_close()
await super().__aexit__(exc_type, exc, tb)
async def _submit(
self,
request_json: str,
sign: bool = None,
taa_accept: bool = None,
sign_did: DIDInfo = sentinel,
) -> str:
"""
Sign and submit request to ledger.
Args:
request_json: The json string to submit
sign: whether or not to sign the request
taa_accept: whether to apply TAA acceptance to the (signed, write) request
sign_did: override the signing DID
"""
if not self.pool.handle:
raise ClosedPoolError(
f"Cannot sign and submit request to closed pool '{self.pool.name}'"
)
if sign is None or sign:
if sign_did is sentinel:
sign_did = await self.wallet.get_public_did()
if sign is None:
sign = bool(sign_did)
if taa_accept is None and sign:
taa_accept = True
if sign:
if not sign_did:
raise BadLedgerRequestError("Cannot sign request without a public DID")
if taa_accept:
acceptance = await self.get_latest_txn_author_acceptance()
if acceptance:
request_json = await (
indy.ledger.append_txn_author_agreement_acceptance_to_request(
request_json,
acceptance["text"],
acceptance["version"],
acceptance["digest"],
acceptance["mechanism"],
acceptance["time"],
)
)
submit_op = indy.ledger.sign_and_submit_request(
self.pool.handle, self.wallet.opened.handle, sign_did.did, request_json
)
else:
submit_op = indy.ledger.submit_request(self.pool.handle, request_json)
with IndyErrorHandler(
"Exception raised by ledger transaction", LedgerTransactionError
):
request_result_json = await submit_op
request_result = json.loads(request_result_json)
operation = request_result.get("op", "")
if operation in ("REQNACK", "REJECT"):
raise LedgerTransactionError(
f"Ledger rejected transaction request: {request_result["reason"]}"
)
elif operation == "REPLY":
return request_result_json
else:
raise LedgerTransactionError(
f"Unexpected operation code from ledger: {operation}"
)
async def create_and_send_schema(
self,
issuer: IndyIssuer,
schema_name: str,
schema_version: str,
attribute_names: Sequence[str],
) -> Tuple[str, dict]:
"""
Send schema to ledger.
Args:
issuer: The issuer instance creating the schema
schema_name: The schema name
schema_version: The schema version
attribute_names: A list of schema attributes
"""
public_info = await self.wallet.get_public_did()
if not public_info:
raise BadLedgerRequestError("Cannot publish schema without a public DID")
schema_info = await self.check_existing_schema(
public_info.did, schema_name, schema_version, attribute_names
)
if schema_info:
LOGGER.warning("Schema already exists on ledger. Returning details.")
schema_id, schema_def = schema_info
else:
if self.pool.read_only:
raise LedgerError(
"Error cannot write schema when ledger is in read only mode"
)
try:
schema_id, schema_json = await issuer.create_schema(
public_info.did,
schema_name,
schema_version,
attribute_names,
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
schema_def = json.loads(schema_json)
with IndyErrorHandler("Exception building schema request", LedgerError):
request_json = await indy.ledger.build_schema_request(
public_info.did, schema_json
)
try:
resp = await self._submit(request_json, True, sign_did=public_info)
try:
# parse sequence number out of response
seq_no = json.loads(resp)["result"]["txnMetadata"]["seqNo"]
schema_def["seqNo"] = seq_no
except KeyError as err:
raise LedgerError(
"Failed to parse schema sequence number from ledger response"
) from err
except LedgerTransactionError as e:
# Identify possible duplicate schema errors on indy-node < 1.9 and > 1.9
if "can have one and only one SCHEMA with name" in getattr(
e, "message", ""
) or "UnauthorizedClientRequest" in getattr(e, "message", ""):
# handle potential race condition if multiple agents are publishing
# the same schema simultaneously
schema_info = await self.check_existing_schema(
public_info.did, schema_name, schema_version, attribute_names
)
if schema_info:
LOGGER.warning(
"Schema already exists on ledger. Returning details."
" Error: %s",
e,
)
schema_id, schema_def = schema_info
else:
raise
schema_id_parts = schema_id.split(":")
schema_tags = {
"schema_id": schema_id,
"schema_issuer_did": public_info.did,
"schema_name": schema_id_parts[-2],
"schema_version": schema_id_parts[-1],
"epoch": str(int(time())),
}
record = StorageRecord(SCHEMA_SENT_RECORD_TYPE, schema_id, schema_tags)
storage = self.get_indy_storage()
await storage.add_record(record)
return schema_id, schema_def
async def check_existing_schema(
self,
public_did: str,
schema_name: str,
schema_version: str,
attribute_names: Sequence[str],
) -> Tuple[str, dict]:
"""Check if a schema has already been published."""
fetch_schema_id = f"{public_did}:2:{schema_name}:{schema_version}"
schema = await self.fetch_schema_by_id(fetch_schema_id)
if schema:
fetched_attrs = schema["attrNames"].copy()
fetched_attrs.sort()
cmp_attrs = list(attribute_names)
cmp_attrs.sort()
if fetched_attrs != cmp_attrs:
raise LedgerTransactionError(
"Schema already exists on ledger, but attributes do not match: "
+ f"{schema_name}:{schema_version} {fetched_attrs} != {cmp_attrs}"
)
return fetch_schema_id, schema
async def get_schema(self, schema_id: str) -> dict:
"""
Get a schema from the cache if available, otherwise fetch from the ledger.
Args:
schema_id: The schema id (or stringified sequence number) to retrieve
"""
if self.pool.cache:
result = await self.pool.cache.get(f"schema::{schema_id}")
if result:
return result
if schema_id.isdigit():
return await self.fetch_schema_by_seq_no(int(schema_id))
else:
return await self.fetch_schema_by_id(schema_id)
async def fetch_schema_by_id(self, schema_id: str) -> dict:
"""
Get schema from ledger.
Args:
schema_id: The schema id (or stringified sequence number) to retrieve
Returns:
Indy schema dict
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building schema request", LedgerError):
request_json = await indy.ledger.build_get_schema_request(
public_did, schema_id
)
response_json = await self._submit(request_json, sign_did=public_info)
response = json.loads(response_json)
if not response["result"]["seqNo"]:
# schema not found
return None
with IndyErrorHandler("Exception parsing schema response", LedgerError):
_, parsed_schema_json = await indy.ledger.parse_get_schema_response(
response_json
)
parsed_response = json.loads(parsed_schema_json)
if parsed_response and self.pool.cache:
await self.pool.cache.set(
[f"schema::{schema_id}", f"schema::{response["result"]["seqNo"]}"],
parsed_response,
self.pool.cache_duration,
)
return parsed_response
async def fetch_schema_by_seq_no(self, seq_no: int):
"""
Fetch a schema by its sequence number.
Args:
seq_no: schema ledger sequence number
Returns:
Indy schema dict
"""
# get txn by sequence number, retrieve schema identifier components
request_json = await indy.ledger.build_get_txn_request(
None, None, seq_no=seq_no
)
response = json.loads(await self._submit(request_json))
# transaction data format assumes node protocol >= 1.4 (circa 2018-07)
data_txn = (response["result"].get("data", {}) or {}).get("txn", {})
if data_txn.get("type", None) == "101": # marks indy-sdk schema txn type
(origin_did, name, version) = (
data_txn["metadata"]["from"],
data_txn["data"]["data"]["name"],
data_txn["data"]["data"]["version"],
)
schema_id = f"{origin_did}:2:{name}:{version}"
return await self.get_schema(schema_id)
raise LedgerTransactionError(
f"Could not get schema from ledger for seq no {seq_no}"
)
async def create_and_send_credential_definition(
self,
issuer: IndyIssuer,
schema_id: str,
signature_type: str = None,
tag: str = None,
support_revocation: bool = False,
) -> Tuple[str, dict, bool]:
"""
Send credential definition to ledger and store relevant key matter in wallet.
Args:
issuer: The issuer instance to use for credential definition creation
schema_id: The schema id of the schema to create cred def for
signature_type: The signature type to use on the credential definition
tag: Optional tag to distinguish multiple credential definitions
support_revocation: Optional flag to enable revocation for this cred def
Returns:
Tuple with cred def id, cred def structure, and whether it's novel
"""
public_info = await self.wallet.get_public_did()
if not public_info:
raise BadLedgerRequestError(
"Cannot publish credential definition without a public DID"
)
schema = await self.get_schema(schema_id)
if not schema:
raise LedgerError(f"Ledger {self.pool.name} has no schema {schema_id}")
novel = False
# check if cred def is on ledger already
for test_tag in [tag] if tag else ["tag", DEFAULT_CRED_DEF_TAG]:
credential_definition_id = issuer.make_credential_definition_id(
public_info.did, schema, signature_type, test_tag
)
ledger_cred_def = await self.fetch_credential_definition(
credential_definition_id
)
if ledger_cred_def:
LOGGER.warning(
"Credential definition %s already exists on ledger %s",
credential_definition_id,
self.pool.name,
)
try:
if not await issuer.credential_definition_in_wallet(
credential_definition_id
):
raise LedgerError(
f"Credential definition {credential_definition_id} is on "
f"ledger {self.pool.name} but not in wallet "
f"{self.wallet.opened.name}"
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
credential_definition_json = json.dumps(ledger_cred_def)
break
else: # no such cred def on ledger
try:
if await issuer.credential_definition_in_wallet(
credential_definition_id
):
raise LedgerError(
f"Credential definition {credential_definition_id} is in "
f"wallet {self.wallet.opened.name} but not on ledger "
f"{self.pool.name}"
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
# Cred def is neither on ledger nor in wallet: create and send it
novel = True
try:
(
credential_definition_id,
credential_definition_json,
) = await issuer.create_and_store_credential_definition(
public_info.did,
schema,
signature_type,
tag,
support_revocation,
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
if self.pool.read_only:
raise LedgerError(
"Error cannot write cred def when ledger is in read only mode"
)
with IndyErrorHandler("Exception building cred def request", LedgerError):
request_json = await indy.ledger.build_cred_def_request(
public_info.did, credential_definition_json
)
await self._submit(request_json, True, sign_did=public_info)
# Add non-secrets record
storage = self.get_indy_storage()
schema_id_parts = schema_id.split(":")
cred_def_tags = {
"schema_id": schema_id,
"schema_issuer_did": schema_id_parts[0],
"schema_name": schema_id_parts[-2],
"schema_version": schema_id_parts[-1],
"issuer_did": public_info.did,
"cred_def_id": credential_definition_id,
"epoch": str(int(time())),
}
record = StorageRecord(
CRED_DEF_SENT_RECORD_TYPE, credential_definition_id, cred_def_tags
)
await storage.add_record(record)
return (credential_definition_id, json.loads(credential_definition_json), novel)
async def get_credential_definition(self, credential_definition_id: str) -> dict:
"""
Get a credential definition from the cache if available, otherwise the ledger.
Args:
credential_definition_id: The schema id of the schema to fetch cred def for
"""
if self.pool.cache:
result = await self.pool.cache.get(
f"credential_definition::{credential_definition_id}"
)
if result:
return result
return await self.fetch_credential_definition(credential_definition_id)
async def fetch_credential_definition(self, credential_definition_id: str) -> dict:
"""
Get a credential definition from the ledger by id.
Args:
credential_definition_id: The cred def id of the cred def to fetch
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building cred def request", LedgerError):
request_json = await indy.ledger.build_get_cred_def_request(
public_did, credential_definition_id
)
response_json = await self._submit(request_json, sign_did=public_info)
with IndyErrorHandler("Exception parsing cred def response", LedgerError):
try:
(
_,
parsed_credential_definition_json,
) = await indy.ledger.parse_get_cred_def_response(response_json)
parsed_response = json.loads(parsed_credential_definition_json)
except IndyError as error:
if error.error_code == ErrorCode.LedgerNotFound:
parsed_response = None
else:
raise
if parsed_response and self.pool.cache:
await self.pool.cache.set(
f"credential_definition::{credential_definition_id}",
parsed_response,
self.pool.cache_duration,
)
return parsed_response
async def credential_definition_id2schema_id(self, credential_definition_id):
"""
From a credential definition, get the identifier for its schema.
Args:
credential_definition_id: The identifier of the credential definition
from which to identify a schema
"""
# scrape schema id or sequence number from cred def id
tokens = credential_definition_id.split(":")
if len(tokens) == 8: # node protocol >= 1.4: cred def id has 5 or 8 tokens
return ":".join(tokens[3:7]) # schema id spans 0-based positions 3-6
# get txn by sequence number, retrieve schema identifier components
seq_no = tokens[3]
return (await self.get_schema(seq_no))["id"]
async def get_key_for_did(self, did: str) -> str:
"""Fetch the verkey for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
"""
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, nym)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = (json.loads(response_json))["result"]["data"]
return full_verkey(did, json.loads(data_json)["verkey"]) if data_json else None
async def get_all_endpoints_for_did(self, did: str) -> dict:
"""Fetch all endpoints for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
"""
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_get_attrib_request(
public_did, nym, "endpoint", None, None
)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = json.loads(response_json)["result"]["data"]
if data_json:
endpoints = json.loads(data_json).get("endpoint", None)
else:
endpoints = None
return endpoints
async def get_endpoint_for_did(
self, did: str, endpoint_type: EndpointType = None
) -> str:
"""Fetch the endpoint for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
endpoint_type: The type of the endpoint. If none given, returns all
"""
if not endpoint_type:
endpoint_type = EndpointType.ENDPOINT
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_get_attrib_request(
public_did, nym, "endpoint", None, None
)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = json.loads(response_json)["result"]["data"]
if data_json:
endpoint = json.loads(data_json).get("endpoint", None)
address = endpoint.get(endpoint_type.indy, None) if endpoint else None
else:
address = None
return address
async def update_endpoint_for_did(
self, did: str, endpoint: str, endpoint_type: EndpointType = None
) -> bool:
"""Check and update the endpoint on the ledger.
Args:
did: The ledger DID
endpoint: The endpoint address
endpoint_type: The type of the endpoint
"""
if not endpoint_type:
endpoint_type = EndpointType.ENDPOINT
all_exist_endpoints = await self.get_all_endpoints_for_did(did)
exist_endpoint_of_type = (
all_exist_endpoints.get(endpoint_type.indy, None)
if all_exist_endpoints
else None
)
if exist_endpoint_of_type != endpoint:
if self.pool.read_only:
raise LedgerError(
"Error cannot update endpoint when ledger is in read only mode"
)
nym = self.did_to_nym(did)
if all_exist_endpoints:
all_exist_endpoints[endpoint_type.indy] = endpoint
attr_json = json.dumps({"endpoint": all_exist_endpoints})
else:
attr_json = json.dumps({"endpoint": {endpoint_type.indy: endpoint}})
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_attrib_request(
nym, nym, None, attr_json, None
)
await self._submit(request_json, True, True)
return True
return False
async def register_nym(
self, did: str, verkey: str, alias: str = None, role: str = None
):
"""
Register a nym on the ledger.
Args:
did: DID to register on the ledger.
verkey: The verification key of the keypair.
alias: Human-friendly alias to assign to the DID.
role: For permissioned ledgers, what role should the new DID have.
"""
if self.pool.read_only:
raise LedgerError(
"Error cannot register nym when ledger is in read only mode"
)
public_info = await self.wallet.get_public_did()
if not public_info:
raise WalletNotFoundError(
f"Cannot register NYM to ledger: wallet {self.wallet.opened.name} "
"has no public DID"
)
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_nym_request(
public_info.did, did, verkey, alias, role
)
await self._submit(request_json) # let ledger raise on insufficient privilege
try:
did_info = await self.wallet.get_local_did(did)
except WalletNotFoundError:
pass # registering another user's NYM
else:
metadata = {**did_info.metadata, **DIDPosture.POSTED.metadata}
await self.wallet.replace_local_did_metadata(did, metadata)
async def get_nym_role(self, did: str) -> Role:
"""
Return the role of the input public DID's NYM on the ledger.
Args:
did: DID to query for role on the ledger.
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building get-nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, did)
response_json = await self._submit(request_json)
response = json.loads(response_json)
nym_data = json.loads(response["result"]["data"])
if not nym_data:
raise BadLedgerRequestError(f"DID {did} is not public")
return Role.get(nym_data["role"])
def nym_to_did(self, nym: str) -> str:
"""Format a nym with the ledger's DID prefix."""
if nym:
# remove any existing prefix
nym = self.did_to_nym(nym)
return f"did:sov:{nym}"
async def rotate_public_did_keypair(self, next_seed: str = None) -> None:
"""
Rotate keypair for public DID: create new key, submit to ledger, update wallet.
Args:
next_seed: seed for incoming ed25519 keypair (default random)
"""
# generate new key
public_info = await self.wallet.get_public_did()
public_did = public_info.did
verkey = await self.wallet.rotate_did_keypair_start(public_did, next_seed)
# submit to ledger (retain role and alias)
nym = self.did_to_nym(public_did)
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, nym)
response_json = await self._submit(request_json)
data = json.loads((json.loads(response_json))["result"]["data"])
if not data:
raise BadLedgerRequestError(
f"Ledger has no public DID for wallet {self.wallet.opened.name}"
)
seq_no = data["seqNo"]
with IndyErrorHandler("Exception building get-txn request", LedgerError):
txn_req_json = await indy.ledger.build_get_txn_request(None, None, seq_no)
txn_resp_json = await self._submit(txn_req_json)
txn_resp = json.loads(txn_resp_json)
txn_resp_data = txn_resp["result"]["data"]
if not txn_resp_data:
raise BadLedgerRequestError(
f"Bad or missing ledger NYM transaction for DID {public_did}"
)
txn_data_data = txn_resp_data["txn"]["data"]
role_token = Role.get(txn_data_data.get("role")).token()
alias = txn_data_data.get("alias")
await self.register_nym(public_did, verkey, role_token, alias)
# update wallet
await self.wallet.rotate_did_keypair_apply(public_did)
async def get_txn_author_agreement(self, reload: bool = False) -> dict:
"""Get the current transaction author agreement, fetching it if necessary."""
if not self.pool.taa_cache or reload:
self.pool.taa_cache = await self.fetch_txn_author_agreement()
return self.pool.taa_cache
async def fetch_txn_author_agreement(self) -> dict:
"""Fetch the current AML and TAA from the ledger."""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
get_aml_req = await indy.ledger.build_get_acceptance_mechanisms_request(
public_did, None, None
)
response_json = await self._submit(get_aml_req, sign_did=public_info)
aml_found = (json.loads(response_json))["result"]["data"]
get_taa_req = await indy.ledger.build_get_txn_author_agreement_request(
public_did, None
)
response_json = await self._submit(get_taa_req, sign_did=public_info)
taa_found = (json.loads(response_json))["result"]["data"]
taa_required = bool(taa_found and taa_found["text"])
if taa_found:
taa_found["digest"] = self.taa_digest(
taa_found["version"], taa_found["text"]
)
return {
"aml_record": aml_found,
"taa_record": taa_found,
"taa_required": taa_required,
}
def get_indy_storage(self) -> IndySdkStorage:
"""Get an IndySdkStorage instance for the current wallet."""
return IndySdkStorage(self.wallet.opened)
def taa_rough_timestamp(self) -> int:
"""Get a timestamp accurate to the day.
Anything more accurate is a privacy concern.
"""
return int(datetime.combine(date.today(), datetime.min.time()).timestamp())
def taa_digest(self, version: str, text: str):
"""Generate the digest of a TAA record."""
if not version or not text:
raise ValueError("Bad input for TAA digest")
taa_plaintext = version + text
return sha256(taa_plaintext.encode("utf-8")).digest().hex()
async def accept_txn_author_agreement(
self, taa_record: dict, mechanism: str, accept_time: int = None
):
"""Save a new record recording the acceptance of the TAA."""
if not accept_time:
accept_time = self.taa_rough_timestamp()
acceptance = {
"text": taa_record["text"],
"version": taa_record["version"],
"digest": taa_record["digest"],
"mechanism": mechanism,
"time": accept_time,
}
record = StorageRecord(
TAA_ACCEPTED_RECORD_TYPE,
json.dumps(acceptance),
{"pool_name": self.pool.name},
)
storage = self.get_indy_storage()
await storage.add_record(record)
if self.pool.cache:
# TAA acceptance must be set for each wallet and pool combination
cache_key = (
TAA_ACCEPTED_RECORD_TYPE
+ "::"
+ self.wallet.opened.name
+ "::"
+ self.pool.name
)
await self.pool.cache.set(cache_key, acceptance, self.pool.cache_duration)
async def get_latest_txn_author_acceptance(self) -> dict:
"""Look up the latest TAA acceptance."""
cache_key = (
TAA_ACCEPTED_RECORD_TYPE
+ "::"
+ self.wallet.opened.name
+ "::"
+ self.pool.name
)
acceptance = self.pool.cache and await self.pool.cache.get(cache_key)
if not acceptance:
storage = self.get_indy_storage()
tag_filter = {"pool_name": self.pool.name}
found = await storage.find_all_records(TAA_ACCEPTED_RECORD_TYPE, tag_filter)
if found:
records = list(json.loads(record.value) for record in found)
records.sort(key=lambda v: v["time"], reverse=True)
acceptance = records[0]
else:
acceptance = {}
if self.pool.cache:
await self.pool.cache.set(
cache_key, acceptance, self.pool.cache_duration
)
return acceptance
async def get_revoc_reg_def(self, revoc_reg_id: str) -> dict:
"""Get revocation registry definition by ID; augment with ledger timestamp."""
public_info = await self.wallet.get_public_did()
try:
fetch_req = await indy.ledger.build_get_revoc_reg_def_request(
public_info and public_info.did, revoc_reg_id
)
response_json = await self._submit(fetch_req, sign_did=public_info)
(
found_id,
found_def_json,
) = await indy.ledger.parse_get_revoc_reg_def_response(response_json)
found_def = json.loads(found_def_json)
found_def["txnTime"] = json.loads(response_json)["result"]["txnTime"]
except IndyError as e:
LOGGER.error(
f"get_revoc_reg_def failed with revoc_reg_id={revoc_reg_id} - "
f"{e.error_code}: {getattr(e, "message", "[no message]")}"
)
raise e
assert found_id == revoc_reg_id
return found_def
async def get_revoc_reg_entry(self, revoc_reg_id: str, timestamp: int):
"""Get revocation registry entry by revocation registry ID and timestamp."""
public_info = await self.wallet.get_public_did()
with IndyErrorHandler("Exception fetching rev reg entry", LedgerError):
try:
fetch_req = await indy.ledger.build_get_revoc_reg_request(
public_info and public_info.did, revoc_reg_id, timestamp
)
response_json = await self._submit(fetch_req, sign_did=public_info)
(
found_id,
found_reg_json,
ledger_timestamp,
) = await indy.ledger.parse_get_revoc_reg_response(response_json)
except IndyError as e:
LOGGER.error(
f"get_revoc_reg_entry failed with revoc_reg_id={revoc_reg_id} - "
f"{e.error_code}: {getattr(e, "message", "[no message]")}"
)
raise e
assert found_id == revoc_reg_id
return json.loads(found_reg_json), ledger_timestamp
async def get_revoc_reg_delta(
self, revoc_reg_id: str, fro=0, to=None
) -> (dict, int):
"""
Look up a revocation registry delta by ID.
:param revoc_reg_id revocation registry id
:param fro earliest EPOCH time of interest
:param to latest EPOCH time of interest
:returns delta response, delta timestamp
"""
if to is None:
to = int(time())
public_info = await self.wallet.get_public_did()
with IndyErrorHandler("Exception building rev reg delta request", LedgerError):
fetch_req = await indy.ledger.build_get_revoc_reg_delta_request(
public_info and public_info.did,
revoc_reg_id,
0 if fro == to else fro,
to,
)
response_json = await self._submit(fetch_req, sign_did=public_info)
with IndyErrorHandler(
(
"Exception parsing rev reg delta response "
"(interval ends before rev reg creation?)"
),
LedgerError,
):
(
found_id,
found_delta_json,
delta_timestamp,
) = await indy.ledger.parse_get_revoc_reg_delta_response(response_json)
assert found_id == revoc_reg_id
return json.loads(found_delta_json), delta_timestamp
async def send_revoc_reg_def(self, revoc_reg_def: dict, issuer_did: str = None):
"""Publish a revocation registry definition to the ledger."""
# NOTE - issuer DID could be extracted from the revoc_reg_def ID
if issuer_did:
did_info = await self.wallet.get_local_did(issuer_did)
else:
did_info = await self.wallet.get_public_did()
if not did_info:
raise LedgerTransactionError(
"No issuer DID found for revocation registry definition"
)
with IndyErrorHandler("Exception building rev reg def", LedgerError):
request_json = await indy.ledger.build_revoc_reg_def_request(
did_info.did, json.dumps(revoc_reg_def)
)
await self._submit(request_json, True, True, did_info)
async def send_revoc_reg_entry(
self,
revoc_reg_id: str,
revoc_def_type: str,
revoc_reg_entry: dict,
issuer_did: str = None,
):
"""Publish a revocation registry entry to the ledger."""
if issuer_did:
did_info = await self.wallet.get_local_did(issuer_did)
else:
did_info = await self.wallet.get_public_did()
if not did_info:
raise LedgerTransactionError(
"No issuer DID found for revocation registry entry"
)
with IndyErrorHandler("Exception building rev reg entry", LedgerError):
request_json = await indy.ledger.build_revoc_reg_entry_request(
did_info.did, revoc_reg_id, revoc_def_type, json.dumps(revoc_reg_entry)
)
await self._submit(request_json, True, True, did_info)
| """Indy ledger implementation."""
import asyncio
import json
import logging
import tempfile
from datetime import datetime, date
from hashlib import sha256
from os import path
from time import time
from typing import Sequence, Tuple
import indy.ledger
import indy.pool
from indy.error import IndyError, ErrorCode
from ..config.base import BaseInjector, BaseProvider, BaseSettings
from ..cache.base import BaseCache
from ..indy.issuer import IndyIssuer, IndyIssuerError, DEFAULT_CRED_DEF_TAG
from ..indy.sdk.error import IndyErrorHandler
from ..messaging.credential_definitions.util import CRED_DEF_SENT_RECORD_TYPE
from ..messaging.schemas.util import SCHEMA_SENT_RECORD_TYPE
from ..storage.base import StorageRecord
from ..storage.indy import IndySdkStorage
from ..utils import sentinel
from ..wallet.base import DIDInfo
from ..wallet.error import WalletNotFoundError
from ..wallet.indy import IndySdkWallet
from ..wallet.util import full_verkey
from ..wallet.did_posture import DIDPosture
from .base import BaseLedger, Role
from .endpoint_type import EndpointType
from .error import (
BadLedgerRequestError,
ClosedPoolError,
LedgerConfigError,
LedgerError,
LedgerTransactionError,
)
from .util import TAA_ACCEPTED_RECORD_TYPE
LOGGER = logging.getLogger(__name__)
GENESIS_TRANSACTION_FILE = "indy_genesis_transactions.txt"
class IndySdkLedgerPoolProvider(BaseProvider):
"""Indy ledger pool provider which keys off the selected pool name."""
def provide(self, settings: BaseSettings, injector: BaseInjector):
"""Create and open the pool instance."""
pool_name = settings.get("ledger.pool_name", "default")
keepalive = int(settings.get("ledger.keepalive", 5))
read_only = bool(settings.get("ledger.read_only", False))
if read_only:
LOGGER.warning("Note: setting ledger to read-only mode")
genesis_transactions = settings.get("ledger.genesis_transactions")
cache = injector.inject(BaseCache, required=False)
ledger_pool = IndySdkLedgerPool(
pool_name,
keepalive=keepalive,
cache=cache,
genesis_transactions=genesis_transactions,
read_only=read_only,
)
return ledger_pool
class IndySdkLedgerPool:
"""Indy ledger manager class."""
def __init__(
self,
name: str,
*,
checked: bool = False,
keepalive: int = 0,
cache: BaseCache = None,
cache_duration: int = 600,
genesis_transactions: str = None,
read_only: bool = False,
):
"""
Initialize an IndySdkLedgerPool instance.
Args:
name: The Indy pool ledger configuration name
keepalive: How many seconds to keep the ledger open
cache: The cache instance to use
cache_duration: The TTL for ledger cache entries
genesis_transactions: The ledger genesis transaction as a string
read_only: Prevent any ledger write operations
"""
self.checked = checked
self.opened = False
self.ref_count = 0
self.ref_lock = asyncio.Lock()
self.keepalive = keepalive
self.close_task: asyncio.Future = None
self.cache = cache
self.cache_duration = cache_duration
self.genesis_transactions = genesis_transactions
self.handle = None
self.name = name
self.taa_cache = None
self.read_only = read_only
async def create_pool_config(
self, genesis_transactions: str, recreate: bool = False
):
"""Create the pool ledger configuration."""
# indy-sdk requires a file to pass the pool configuration
# the file path includes the pool name to avoid conflicts
txn_path = path.join(
tempfile.gettempdir(), f"{self.name}_{GENESIS_TRANSACTION_FILE}"
)
with open(txn_path, "w") as genesis_file:
genesis_file.write(genesis_transactions)
pool_config = json.dumps({"genesis_txn": txn_path})
if await self.check_pool_config():
if recreate:
LOGGER.debug("Removing existing ledger config")
await indy.pool.delete_pool_ledger_config(self.name)
else:
raise LedgerConfigError(
"Ledger pool configuration already exists: %s", self.name
)
LOGGER.debug("Creating pool ledger config")
with IndyErrorHandler(
"Exception creating pool ledger config", LedgerConfigError
):
await indy.pool.create_pool_ledger_config(self.name, pool_config)
async def check_pool_config(self) -> bool:
"""Check if a pool config has been created."""
pool_names = {cfg["pool"] for cfg in await indy.pool.list_pools()}
return self.name in pool_names
async def open(self):
"""Open the pool ledger, creating it if necessary."""
if self.genesis_transactions:
await self.create_pool_config(self.genesis_transactions, True)
self.genesis_transactions = None
self.checked = True
elif not self.checked:
if not await self.check_pool_config():
raise LedgerError("Ledger pool configuration has not been created")
self.checked = True
# We only support proto ver 2
with IndyErrorHandler(
"Exception setting ledger protocol version", LedgerConfigError
):
await indy.pool.set_protocol_version(2)
with IndyErrorHandler(
f"Exception opening pool ledger {self.name}", LedgerConfigError
):
self.handle = await indy.pool.open_pool_ledger(self.name, "{}")
self.opened = True
async def close(self):
"""Close the pool ledger."""
if self.opened:
exc = None
for attempt in range(3):
try:
await indy.pool.close_pool_ledger(self.handle)
except IndyError as err:
await asyncio.sleep(0.01)
exc = err
continue
self.handle = None
self.opened = False
exc = None
break
if exc:
LOGGER.error("Exception closing pool ledger")
self.ref_count += 1 # if we are here, we should have self.ref_lock
self.close_task = None
raise IndyErrorHandler.wrap_error(
exc, "Exception closing pool ledger", LedgerError
)
async def context_open(self):
"""Open the ledger if necessary and increase the number of active references."""
async with self.ref_lock:
if self.close_task:
self.close_task.cancel()
if not self.opened:
LOGGER.debug("Opening the pool ledger")
await self.open()
self.ref_count += 1
async def context_close(self):
"""Release the reference and schedule closing of the pool ledger."""
async def closer(timeout: int):
"""Close the pool ledger after a timeout."""
await asyncio.sleep(timeout)
async with self.ref_lock:
if not self.ref_count:
LOGGER.debug("Closing pool ledger after timeout")
await self.close()
async with self.ref_lock:
self.ref_count -= 1
if not self.ref_count:
if self.keepalive:
self.close_task = asyncio.ensure_future(closer(self.keepalive))
else:
await self.close()
class IndySdkLedger(BaseLedger):
"""Indy ledger class."""
BACKEND_NAME = "indy"
def __init__(
self,
pool: IndySdkLedgerPool,
wallet: IndySdkWallet,
):
"""
Initialize an IndySdkLedger instance.
Args:
pool: The pool instance handling the raw ledger connection
wallet: The IndySdkWallet instance
"""
self.pool = pool
self.wallet = wallet
@property
def pool_handle(self):
"""Accessor for the ledger pool handle."""
return self.pool.handle
@property
def pool_name(self) -> str:
"""Accessor for the ledger pool name."""
return self.pool.name
@property
def read_only(self) -> bool:
"""Accessor for the ledger read-only flag."""
return self.pool.read_only
async def __aenter__(self) -> "IndySdkLedger":
"""
Context manager entry.
Returns:
The current instance
"""
await super().__aenter__()
await self.pool.context_open()
return self
async def __aexit__(self, exc_type, exc, tb):
"""Context manager exit."""
await self.pool.context_close()
await super().__aexit__(exc_type, exc, tb)
async def _submit(
self,
request_json: str,
sign: bool = None,
taa_accept: bool = None,
sign_did: DIDInfo = sentinel,
) -> str:
"""
Sign and submit request to ledger.
Args:
request_json: The json string to submit
sign: whether or not to sign the request
taa_accept: whether to apply TAA acceptance to the (signed, write) request
sign_did: override the signing DID
"""
if not self.pool.handle:
raise ClosedPoolError(
f"Cannot sign and submit request to closed pool '{self.pool.name}'"
)
if sign is None or sign:
if sign_did is sentinel:
sign_did = await self.wallet.get_public_did()
if sign is None:
sign = bool(sign_did)
if taa_accept is None and sign:
taa_accept = True
if sign:
if not sign_did:
raise BadLedgerRequestError("Cannot sign request without a public DID")
if taa_accept:
acceptance = await self.get_latest_txn_author_acceptance()
if acceptance:
request_json = await (
indy.ledger.append_txn_author_agreement_acceptance_to_request(
request_json,
acceptance["text"],
acceptance["version"],
acceptance["digest"],
acceptance["mechanism"],
acceptance["time"],
)
)
submit_op = indy.ledger.sign_and_submit_request(
self.pool.handle, self.wallet.opened.handle, sign_did.did, request_json
)
else:
submit_op = indy.ledger.submit_request(self.pool.handle, request_json)
with IndyErrorHandler(
"Exception raised by ledger transaction", LedgerTransactionError
):
request_result_json = await submit_op
request_result = json.loads(request_result_json)
operation = request_result.get("op", "")
if operation in ("REQNACK", "REJECT"):
raise LedgerTransactionError(
f"Ledger rejected transaction request: {request_result['reason']}"
)
elif operation == "REPLY":
return request_result_json
else:
raise LedgerTransactionError(
f"Unexpected operation code from ledger: {operation}"
)
async def create_and_send_schema(
self,
issuer: IndyIssuer,
schema_name: str,
schema_version: str,
attribute_names: Sequence[str],
) -> Tuple[str, dict]:
"""
Send schema to ledger.
Args:
issuer: The issuer instance creating the schema
schema_name: The schema name
schema_version: The schema version
attribute_names: A list of schema attributes
"""
public_info = await self.wallet.get_public_did()
if not public_info:
raise BadLedgerRequestError("Cannot publish schema without a public DID")
schema_info = await self.check_existing_schema(
public_info.did, schema_name, schema_version, attribute_names
)
if schema_info:
LOGGER.warning("Schema already exists on ledger. Returning details.")
schema_id, schema_def = schema_info
else:
if self.pool.read_only:
raise LedgerError(
"Error cannot write schema when ledger is in read only mode"
)
try:
schema_id, schema_json = await issuer.create_schema(
public_info.did,
schema_name,
schema_version,
attribute_names,
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
schema_def = json.loads(schema_json)
with IndyErrorHandler("Exception building schema request", LedgerError):
request_json = await indy.ledger.build_schema_request(
public_info.did, schema_json
)
try:
resp = await self._submit(request_json, True, sign_did=public_info)
try:
# parse sequence number out of response
seq_no = json.loads(resp)["result"]["txnMetadata"]["seqNo"]
schema_def["seqNo"] = seq_no
except KeyError as err:
raise LedgerError(
"Failed to parse schema sequence number from ledger response"
) from err
except LedgerTransactionError as e:
# Identify possible duplicate schema errors on indy-node < 1.9 and > 1.9
if "can have one and only one SCHEMA with name" in getattr(
e, "message", ""
) or "UnauthorizedClientRequest" in getattr(e, "message", ""):
# handle potential race condition if multiple agents are publishing
# the same schema simultaneously
schema_info = await self.check_existing_schema(
public_info.did, schema_name, schema_version, attribute_names
)
if schema_info:
LOGGER.warning(
"Schema already exists on ledger. Returning details."
" Error: %s",
e,
)
schema_id, schema_def = schema_info
else:
raise
schema_id_parts = schema_id.split(":")
schema_tags = {
"schema_id": schema_id,
"schema_issuer_did": public_info.did,
"schema_name": schema_id_parts[-2],
"schema_version": schema_id_parts[-1],
"epoch": str(int(time())),
}
record = StorageRecord(SCHEMA_SENT_RECORD_TYPE, schema_id, schema_tags)
storage = self.get_indy_storage()
await storage.add_record(record)
return schema_id, schema_def
async def check_existing_schema(
self,
public_did: str,
schema_name: str,
schema_version: str,
attribute_names: Sequence[str],
) -> Tuple[str, dict]:
"""Check if a schema has already been published."""
fetch_schema_id = f"{public_did}:2:{schema_name}:{schema_version}"
schema = await self.fetch_schema_by_id(fetch_schema_id)
if schema:
fetched_attrs = schema["attrNames"].copy()
fetched_attrs.sort()
cmp_attrs = list(attribute_names)
cmp_attrs.sort()
if fetched_attrs != cmp_attrs:
raise LedgerTransactionError(
"Schema already exists on ledger, but attributes do not match: "
+ f"{schema_name}:{schema_version} {fetched_attrs} != {cmp_attrs}"
)
return fetch_schema_id, schema
async def get_schema(self, schema_id: str) -> dict:
"""
Get a schema from the cache if available, otherwise fetch from the ledger.
Args:
schema_id: The schema id (or stringified sequence number) to retrieve
"""
if self.pool.cache:
result = await self.pool.cache.get(f"schema::{schema_id}")
if result:
return result
if schema_id.isdigit():
return await self.fetch_schema_by_seq_no(int(schema_id))
else:
return await self.fetch_schema_by_id(schema_id)
async def fetch_schema_by_id(self, schema_id: str) -> dict:
"""
Get schema from ledger.
Args:
schema_id: The schema id (or stringified sequence number) to retrieve
Returns:
Indy schema dict
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building schema request", LedgerError):
request_json = await indy.ledger.build_get_schema_request(
public_did, schema_id
)
response_json = await self._submit(request_json, sign_did=public_info)
response = json.loads(response_json)
if not response["result"]["seqNo"]:
# schema not found
return None
with IndyErrorHandler("Exception parsing schema response", LedgerError):
_, parsed_schema_json = await indy.ledger.parse_get_schema_response(
response_json
)
parsed_response = json.loads(parsed_schema_json)
if parsed_response and self.pool.cache:
await self.pool.cache.set(
[f"schema::{schema_id}", f"schema::{response['result']['seqNo']}"],
parsed_response,
self.pool.cache_duration,
)
return parsed_response
async def fetch_schema_by_seq_no(self, seq_no: int):
"""
Fetch a schema by its sequence number.
Args:
seq_no: schema ledger sequence number
Returns:
Indy schema dict
"""
# get txn by sequence number, retrieve schema identifier components
request_json = await indy.ledger.build_get_txn_request(
None, None, seq_no=seq_no
)
response = json.loads(await self._submit(request_json))
# transaction data format assumes node protocol >= 1.4 (circa 2018-07)
data_txn = (response["result"].get("data", {}) or {}).get("txn", {})
if data_txn.get("type", None) == "101": # marks indy-sdk schema txn type
(origin_did, name, version) = (
data_txn["metadata"]["from"],
data_txn["data"]["data"]["name"],
data_txn["data"]["data"]["version"],
)
schema_id = f"{origin_did}:2:{name}:{version}"
return await self.get_schema(schema_id)
raise LedgerTransactionError(
f"Could not get schema from ledger for seq no {seq_no}"
)
async def create_and_send_credential_definition(
self,
issuer: IndyIssuer,
schema_id: str,
signature_type: str = None,
tag: str = None,
support_revocation: bool = False,
) -> Tuple[str, dict, bool]:
"""
Send credential definition to ledger and store relevant key matter in wallet.
Args:
issuer: The issuer instance to use for credential definition creation
schema_id: The schema id of the schema to create cred def for
signature_type: The signature type to use on the credential definition
tag: Optional tag to distinguish multiple credential definitions
support_revocation: Optional flag to enable revocation for this cred def
Returns:
Tuple with cred def id, cred def structure, and whether it's novel
"""
public_info = await self.wallet.get_public_did()
if not public_info:
raise BadLedgerRequestError(
"Cannot publish credential definition without a public DID"
)
schema = await self.get_schema(schema_id)
if not schema:
raise LedgerError(f"Ledger {self.pool.name} has no schema {schema_id}")
novel = False
# check if cred def is on ledger already
for test_tag in [tag] if tag else ["tag", DEFAULT_CRED_DEF_TAG]:
credential_definition_id = issuer.make_credential_definition_id(
public_info.did, schema, signature_type, test_tag
)
ledger_cred_def = await self.fetch_credential_definition(
credential_definition_id
)
if ledger_cred_def:
LOGGER.warning(
"Credential definition %s already exists on ledger %s",
credential_definition_id,
self.pool.name,
)
try:
if not await issuer.credential_definition_in_wallet(
credential_definition_id
):
raise LedgerError(
f"Credential definition {credential_definition_id} is on "
f"ledger {self.pool.name} but not in wallet "
f"{self.wallet.opened.name}"
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
credential_definition_json = json.dumps(ledger_cred_def)
break
else: # no such cred def on ledger
try:
if await issuer.credential_definition_in_wallet(
credential_definition_id
):
raise LedgerError(
f"Credential definition {credential_definition_id} is in "
f"wallet {self.wallet.opened.name} but not on ledger "
f"{self.pool.name}"
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
# Cred def is neither on ledger nor in wallet: create and send it
novel = True
try:
(
credential_definition_id,
credential_definition_json,
) = await issuer.create_and_store_credential_definition(
public_info.did,
schema,
signature_type,
tag,
support_revocation,
)
except IndyIssuerError as err:
raise LedgerError(err.message) from err
if self.pool.read_only:
raise LedgerError(
"Error cannot write cred def when ledger is in read only mode"
)
with IndyErrorHandler("Exception building cred def request", LedgerError):
request_json = await indy.ledger.build_cred_def_request(
public_info.did, credential_definition_json
)
await self._submit(request_json, True, sign_did=public_info)
# Add non-secrets record
storage = self.get_indy_storage()
schema_id_parts = schema_id.split(":")
cred_def_tags = {
"schema_id": schema_id,
"schema_issuer_did": schema_id_parts[0],
"schema_name": schema_id_parts[-2],
"schema_version": schema_id_parts[-1],
"issuer_did": public_info.did,
"cred_def_id": credential_definition_id,
"epoch": str(int(time())),
}
record = StorageRecord(
CRED_DEF_SENT_RECORD_TYPE, credential_definition_id, cred_def_tags
)
await storage.add_record(record)
return (credential_definition_id, json.loads(credential_definition_json), novel)
async def get_credential_definition(self, credential_definition_id: str) -> dict:
"""
Get a credential definition from the cache if available, otherwise the ledger.
Args:
credential_definition_id: The schema id of the schema to fetch cred def for
"""
if self.pool.cache:
result = await self.pool.cache.get(
f"credential_definition::{credential_definition_id}"
)
if result:
return result
return await self.fetch_credential_definition(credential_definition_id)
async def fetch_credential_definition(self, credential_definition_id: str) -> dict:
"""
Get a credential definition from the ledger by id.
Args:
credential_definition_id: The cred def id of the cred def to fetch
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building cred def request", LedgerError):
request_json = await indy.ledger.build_get_cred_def_request(
public_did, credential_definition_id
)
response_json = await self._submit(request_json, sign_did=public_info)
with IndyErrorHandler("Exception parsing cred def response", LedgerError):
try:
(
_,
parsed_credential_definition_json,
) = await indy.ledger.parse_get_cred_def_response(response_json)
parsed_response = json.loads(parsed_credential_definition_json)
except IndyError as error:
if error.error_code == ErrorCode.LedgerNotFound:
parsed_response = None
else:
raise
if parsed_response and self.pool.cache:
await self.pool.cache.set(
f"credential_definition::{credential_definition_id}",
parsed_response,
self.pool.cache_duration,
)
return parsed_response
async def credential_definition_id2schema_id(self, credential_definition_id):
"""
From a credential definition, get the identifier for its schema.
Args:
credential_definition_id: The identifier of the credential definition
from which to identify a schema
"""
# scrape schema id or sequence number from cred def id
tokens = credential_definition_id.split(":")
if len(tokens) == 8: # node protocol >= 1.4: cred def id has 5 or 8 tokens
return ":".join(tokens[3:7]) # schema id spans 0-based positions 3-6
# get txn by sequence number, retrieve schema identifier components
seq_no = tokens[3]
return (await self.get_schema(seq_no))["id"]
async def get_key_for_did(self, did: str) -> str:
"""Fetch the verkey for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
"""
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, nym)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = (json.loads(response_json))["result"]["data"]
return full_verkey(did, json.loads(data_json)["verkey"]) if data_json else None
async def get_all_endpoints_for_did(self, did: str) -> dict:
"""Fetch all endpoints for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
"""
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_get_attrib_request(
public_did, nym, "endpoint", None, None
)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = json.loads(response_json)["result"]["data"]
if data_json:
endpoints = json.loads(data_json).get("endpoint", None)
else:
endpoints = None
return endpoints
async def get_endpoint_for_did(
self, did: str, endpoint_type: EndpointType = None
) -> str:
"""Fetch the endpoint for a ledger DID.
Args:
did: The DID to look up on the ledger or in the cache
endpoint_type: The type of the endpoint. If none given, returns all
"""
if not endpoint_type:
endpoint_type = EndpointType.ENDPOINT
nym = self.did_to_nym(did)
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_get_attrib_request(
public_did, nym, "endpoint", None, None
)
response_json = await self._submit(request_json, sign_did=public_info)
data_json = json.loads(response_json)["result"]["data"]
if data_json:
endpoint = json.loads(data_json).get("endpoint", None)
address = endpoint.get(endpoint_type.indy, None) if endpoint else None
else:
address = None
return address
async def update_endpoint_for_did(
self, did: str, endpoint: str, endpoint_type: EndpointType = None
) -> bool:
"""Check and update the endpoint on the ledger.
Args:
did: The ledger DID
endpoint: The endpoint address
endpoint_type: The type of the endpoint
"""
if not endpoint_type:
endpoint_type = EndpointType.ENDPOINT
all_exist_endpoints = await self.get_all_endpoints_for_did(did)
exist_endpoint_of_type = (
all_exist_endpoints.get(endpoint_type.indy, None)
if all_exist_endpoints
else None
)
if exist_endpoint_of_type != endpoint:
if self.pool.read_only:
raise LedgerError(
"Error cannot update endpoint when ledger is in read only mode"
)
nym = self.did_to_nym(did)
if all_exist_endpoints:
all_exist_endpoints[endpoint_type.indy] = endpoint
attr_json = json.dumps({"endpoint": all_exist_endpoints})
else:
attr_json = json.dumps({"endpoint": {endpoint_type.indy: endpoint}})
with IndyErrorHandler("Exception building attribute request", LedgerError):
request_json = await indy.ledger.build_attrib_request(
nym, nym, None, attr_json, None
)
await self._submit(request_json, True, True)
return True
return False
async def register_nym(
self, did: str, verkey: str, alias: str = None, role: str = None
):
"""
Register a nym on the ledger.
Args:
did: DID to register on the ledger.
verkey: The verification key of the keypair.
alias: Human-friendly alias to assign to the DID.
role: For permissioned ledgers, what role should the new DID have.
"""
if self.pool.read_only:
raise LedgerError(
"Error cannot register nym when ledger is in read only mode"
)
public_info = await self.wallet.get_public_did()
if not public_info:
raise WalletNotFoundError(
f"Cannot register NYM to ledger: wallet {self.wallet.opened.name} "
"has no public DID"
)
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_nym_request(
public_info.did, did, verkey, alias, role
)
await self._submit(request_json) # let ledger raise on insufficient privilege
try:
did_info = await self.wallet.get_local_did(did)
except WalletNotFoundError:
pass # registering another user's NYM
else:
metadata = {**did_info.metadata, **DIDPosture.POSTED.metadata}
await self.wallet.replace_local_did_metadata(did, metadata)
async def get_nym_role(self, did: str) -> Role:
"""
Return the role of the input public DID's NYM on the ledger.
Args:
did: DID to query for role on the ledger.
"""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
with IndyErrorHandler("Exception building get-nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, did)
response_json = await self._submit(request_json)
response = json.loads(response_json)
nym_data = json.loads(response["result"]["data"])
if not nym_data:
raise BadLedgerRequestError(f"DID {did} is not public")
return Role.get(nym_data["role"])
def nym_to_did(self, nym: str) -> str:
"""Format a nym with the ledger's DID prefix."""
if nym:
# remove any existing prefix
nym = self.did_to_nym(nym)
return f"did:sov:{nym}"
async def rotate_public_did_keypair(self, next_seed: str = None) -> None:
"""
Rotate keypair for public DID: create new key, submit to ledger, update wallet.
Args:
next_seed: seed for incoming ed25519 keypair (default random)
"""
# generate new key
public_info = await self.wallet.get_public_did()
public_did = public_info.did
verkey = await self.wallet.rotate_did_keypair_start(public_did, next_seed)
# submit to ledger (retain role and alias)
nym = self.did_to_nym(public_did)
with IndyErrorHandler("Exception building nym request", LedgerError):
request_json = await indy.ledger.build_get_nym_request(public_did, nym)
response_json = await self._submit(request_json)
data = json.loads((json.loads(response_json))["result"]["data"])
if not data:
raise BadLedgerRequestError(
f"Ledger has no public DID for wallet {self.wallet.opened.name}"
)
seq_no = data["seqNo"]
with IndyErrorHandler("Exception building get-txn request", LedgerError):
txn_req_json = await indy.ledger.build_get_txn_request(None, None, seq_no)
txn_resp_json = await self._submit(txn_req_json)
txn_resp = json.loads(txn_resp_json)
txn_resp_data = txn_resp["result"]["data"]
if not txn_resp_data:
raise BadLedgerRequestError(
f"Bad or missing ledger NYM transaction for DID {public_did}"
)
txn_data_data = txn_resp_data["txn"]["data"]
role_token = Role.get(txn_data_data.get("role")).token()
alias = txn_data_data.get("alias")
await self.register_nym(public_did, verkey, role_token, alias)
# update wallet
await self.wallet.rotate_did_keypair_apply(public_did)
async def get_txn_author_agreement(self, reload: bool = False) -> dict:
"""Get the current transaction author agreement, fetching it if necessary."""
if not self.pool.taa_cache or reload:
self.pool.taa_cache = await self.fetch_txn_author_agreement()
return self.pool.taa_cache
async def fetch_txn_author_agreement(self) -> dict:
"""Fetch the current AML and TAA from the ledger."""
public_info = await self.wallet.get_public_did()
public_did = public_info.did if public_info else None
get_aml_req = await indy.ledger.build_get_acceptance_mechanisms_request(
public_did, None, None
)
response_json = await self._submit(get_aml_req, sign_did=public_info)
aml_found = (json.loads(response_json))["result"]["data"]
get_taa_req = await indy.ledger.build_get_txn_author_agreement_request(
public_did, None
)
response_json = await self._submit(get_taa_req, sign_did=public_info)
taa_found = (json.loads(response_json))["result"]["data"]
taa_required = bool(taa_found and taa_found["text"])
if taa_found:
taa_found["digest"] = self.taa_digest(
taa_found["version"], taa_found["text"]
)
return {
"aml_record": aml_found,
"taa_record": taa_found,
"taa_required": taa_required,
}
def get_indy_storage(self) -> IndySdkStorage:
"""Get an IndySdkStorage instance for the current wallet."""
return IndySdkStorage(self.wallet.opened)
def taa_rough_timestamp(self) -> int:
"""Get a timestamp accurate to the day.
Anything more accurate is a privacy concern.
"""
return int(datetime.combine(date.today(), datetime.min.time()).timestamp())
def taa_digest(self, version: str, text: str):
"""Generate the digest of a TAA record."""
if not version or not text:
raise ValueError("Bad input for TAA digest")
taa_plaintext = version + text
return sha256(taa_plaintext.encode("utf-8")).digest().hex()
async def accept_txn_author_agreement(
self, taa_record: dict, mechanism: str, accept_time: int = None
):
"""Save a new record recording the acceptance of the TAA."""
if not accept_time:
accept_time = self.taa_rough_timestamp()
acceptance = {
"text": taa_record["text"],
"version": taa_record["version"],
"digest": taa_record["digest"],
"mechanism": mechanism,
"time": accept_time,
}
record = StorageRecord(
TAA_ACCEPTED_RECORD_TYPE,
json.dumps(acceptance),
{"pool_name": self.pool.name},
)
storage = self.get_indy_storage()
await storage.add_record(record)
if self.pool.cache:
# TAA acceptance must be set for each wallet and pool combination
cache_key = (
TAA_ACCEPTED_RECORD_TYPE
+ "::"
+ self.wallet.opened.name
+ "::"
+ self.pool.name
)
await self.pool.cache.set(cache_key, acceptance, self.pool.cache_duration)
async def get_latest_txn_author_acceptance(self) -> dict:
"""Look up the latest TAA acceptance."""
cache_key = (
TAA_ACCEPTED_RECORD_TYPE
+ "::"
+ self.wallet.opened.name
+ "::"
+ self.pool.name
)
acceptance = self.pool.cache and await self.pool.cache.get(cache_key)
if not acceptance:
storage = self.get_indy_storage()
tag_filter = {"pool_name": self.pool.name}
found = await storage.find_all_records(TAA_ACCEPTED_RECORD_TYPE, tag_filter)
if found:
records = list(json.loads(record.value) for record in found)
records.sort(key=lambda v: v["time"], reverse=True)
acceptance = records[0]
else:
acceptance = {}
if self.pool.cache:
await self.pool.cache.set(
cache_key, acceptance, self.pool.cache_duration
)
return acceptance
async def get_revoc_reg_def(self, revoc_reg_id: str) -> dict:
"""Get revocation registry definition by ID; augment with ledger timestamp."""
public_info = await self.wallet.get_public_did()
try:
fetch_req = await indy.ledger.build_get_revoc_reg_def_request(
public_info and public_info.did, revoc_reg_id
)
response_json = await self._submit(fetch_req, sign_did=public_info)
(
found_id,
found_def_json,
) = await indy.ledger.parse_get_revoc_reg_def_response(response_json)
found_def = json.loads(found_def_json)
found_def["txnTime"] = json.loads(response_json)["result"]["txnTime"]
except IndyError as e:
LOGGER.error(
f"get_revoc_reg_def failed with revoc_reg_id={revoc_reg_id} - "
f"{e.error_code}: {getattr(e, 'message', '[no message]')}"
)
raise e
assert found_id == revoc_reg_id
return found_def
async def get_revoc_reg_entry(self, revoc_reg_id: str, timestamp: int):
"""Get revocation registry entry by revocation registry ID and timestamp."""
public_info = await self.wallet.get_public_did()
with IndyErrorHandler("Exception fetching rev reg entry", LedgerError):
try:
fetch_req = await indy.ledger.build_get_revoc_reg_request(
public_info and public_info.did, revoc_reg_id, timestamp
)
response_json = await self._submit(fetch_req, sign_did=public_info)
(
found_id,
found_reg_json,
ledger_timestamp,
) = await indy.ledger.parse_get_revoc_reg_response(response_json)
except IndyError as e:
LOGGER.error(
f"get_revoc_reg_entry failed with revoc_reg_id={revoc_reg_id} - "
f"{e.error_code}: {getattr(e, 'message', '[no message]')}"
)
raise e
assert found_id == revoc_reg_id
return json.loads(found_reg_json), ledger_timestamp
async def get_revoc_reg_delta(
self, revoc_reg_id: str, fro=0, to=None
) -> (dict, int):
"""
Look up a revocation registry delta by ID.
:param revoc_reg_id revocation registry id
:param fro earliest EPOCH time of interest
:param to latest EPOCH time of interest
:returns delta response, delta timestamp
"""
if to is None:
to = int(time())
public_info = await self.wallet.get_public_did()
with IndyErrorHandler("Exception building rev reg delta request", LedgerError):
fetch_req = await indy.ledger.build_get_revoc_reg_delta_request(
public_info and public_info.did,
revoc_reg_id,
0 if fro == to else fro,
to,
)
response_json = await self._submit(fetch_req, sign_did=public_info)
with IndyErrorHandler(
(
"Exception parsing rev reg delta response "
"(interval ends before rev reg creation?)"
),
LedgerError,
):
(
found_id,
found_delta_json,
delta_timestamp,
) = await indy.ledger.parse_get_revoc_reg_delta_response(response_json)
assert found_id == revoc_reg_id
return json.loads(found_delta_json), delta_timestamp
async def send_revoc_reg_def(self, revoc_reg_def: dict, issuer_did: str = None):
"""Publish a revocation registry definition to the ledger."""
# NOTE - issuer DID could be extracted from the revoc_reg_def ID
if issuer_did:
did_info = await self.wallet.get_local_did(issuer_did)
else:
did_info = await self.wallet.get_public_did()
if not did_info:
raise LedgerTransactionError(
"No issuer DID found for revocation registry definition"
)
with IndyErrorHandler("Exception building rev reg def", LedgerError):
request_json = await indy.ledger.build_revoc_reg_def_request(
did_info.did, json.dumps(revoc_reg_def)
)
await self._submit(request_json, True, True, did_info)
async def send_revoc_reg_entry(
self,
revoc_reg_id: str,
revoc_def_type: str,
revoc_reg_entry: dict,
issuer_did: str = None,
):
"""Publish a revocation registry entry to the ledger."""
if issuer_did:
did_info = await self.wallet.get_local_did(issuer_did)
else:
did_info = await self.wallet.get_public_did()
if not did_info:
raise LedgerTransactionError(
"No issuer DID found for revocation registry entry"
)
with IndyErrorHandler("Exception building rev reg entry", LedgerError):
request_json = await indy.ledger.build_revoc_reg_entry_request(
did_info.did, revoc_reg_id, revoc_def_type, json.dumps(revoc_reg_entry)
)
await self._submit(request_json, True, True, did_info)
|
import asyncio
import aiohttp # type: ignore
import math
import os
import datetime
import re
# import boto3 # type: ignore
import json
import io
import argparse
import gzip
from pathlib import Path
import os
# from cryptography.hazmat.backends import default_backend
# import jwt
# import requests
import time
import requests
from typing import *
# BUCKET = os.getenv("bucket", "ossci-job-status")
# APP_ID = int(os.environ["app_id"])
# # The private key needs to maintain its newlines, get it via
# # $ cat key.pem | tr '\n' '|' | pbcopy
# PRIVATE_KEY = os.environ["private_key"].replace("|", "\n")
GH_PAT = os.environ["GH_PAT"]
def app_headers() -> Dict[str, str]:
headers = {
"Authorization": f"Bearer {GH_PAT}",
"Accept": "application/vnd.github.machine-man-preview+json",
}
return headers
cert_bytes = PRIVATE_KEY.encode()
private_key = default_backend().load_pem_private_key(cert_bytes, None) # type: ignore
time_since_epoch_in_seconds = int(time.time())
payload = {
# issued at time
"iat": time_since_epoch_in_seconds,
# JWT expiration time (10 minute maximum)
"exp": time_since_epoch_in_seconds + (10 * 60),
# GitHub App's identifier
"iss": APP_ID,
}
actual_jwt = jwt.encode(payload, private_key, algorithm="RS256")
headers = {
"Authorization": f"Bearer {actual_jwt}",
"Accept": "application/vnd.github.machine-man-preview+json",
}
return headers
def jprint(obj: Any) -> None:
print(json.dumps(obj, indent=2))
def installation_id(user: str) -> int:
r_bytes = requests.get(
"https://api.github.com/app/installations", headers=app_headers()
)
r = json.loads(r_bytes.content.decode())
for item in r:
if item["account"]["login"] == user:
return int(item["id"])
raise RuntimeError(f"User {user} not found in {r}")
def user_token(user: str) -> str:
"""
Authorize this request with the GitHub app set by the 'app_id' and
'private_key' environment variables.
1. Get the installation ID for the user that has installed the app
2. Request a new token for that user
3. Return it so it can be used in future API requests
"""
# Hardcode the installation to PyTorch so we can always get a valid ID key
id = installation_id("pytorch")
url = f"https://api.github.com/app/installations/{id}/access_tokens"
r_bytes = requests.post(url, headers=app_headers())
r = json.loads(r_bytes.content.decode())
token = str(r["token"])
return token
# if "AWS_KEY_ID" in os.environ and "AWS_SECRET_KEY" in os.environ:
# # Use keys for local development
# session = boto3.Session(
# aws_access_key_id=os.environ.get("AWS_KEY_ID"),
# aws_secret_access_key=os.environ.get("AWS_SECRET_KEY"),
# )
# else:
# # In the Lambda, use permissions on the Lambda's role
# session = boto3.Session()
# s3 = session.resource("s3")
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def head_commit_query(user: str, repo: str, branches: List[str]) -> str:
"""
Fetch the head commit for a list of branches
"""
def branch_part(branch: str, num: int) -> str:
return f"""
r{num}: repository(name: "{repo}", owner: "{user}") {{
ref(qualifiedName:"refs/heads/{branch}") {{
name
target {{
... on Commit {{
oid
}}
}}
}}
}}
"""
parts = [branch_part(branch, i) for i, branch in enumerate(branches)]
return "{" + "\n".join(parts) + "}"
def extract_gha(suites: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for suite in suites:
suite = suite["node"]
if suite["workflowRun"] is None:
# If no jobs were triggered this will be empty
continue
workflow = suite["workflowRun"]["workflow"]["name"]
for run in suite["checkRuns"]["nodes"]:
conclusion = run["conclusion"]
if conclusion is None:
if run["status"].lower() == "queued":
conclusion = "queued"
elif run["status"].lower() == "in_progress":
conclusion = "pending"
else:
raise RuntimeError(f"unexpected run {run}")
jobs.append(
{
"name": f"{workflow} / {run["name"]}",
"status": conclusion.lower(),
"url": run["detailsUrl"],
}
)
return jobs
def extract_status(contexts: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for context in contexts:
jobs.append(
{
"name": context["context"],
"status": context["state"].lower(),
"url": context["targetUrl"],
}
)
return jobs
def extract_jobs(raw_commits: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
commits = []
for raw_commit in raw_commits:
if raw_commit["status"] is None:
# Will be none if no non-GHA jobs were triggered
status = []
else:
status = extract_status(raw_commit["status"]["contexts"])
gha = extract_gha(raw_commit["checkSuites"]["edges"])
jobs = status + gha
if raw_commit["author"]["user"] is None:
author = raw_commit["author"]["name"]
else:
author = raw_commit["author"]["user"]["login"]
commits.append(
{
"sha": raw_commit["oid"],
"headline": raw_commit["messageHeadline"],
"body": raw_commit["messageBody"],
"author": author,
"date": raw_commit["authoredDate"],
"jobs": jobs,
}
)
return commits
class BranchHandler:
def __init__(
self,
gql: Any,
user: str,
repo: str,
name: str,
head: str,
history_size: int,
fetch_size: int,
):
self.gql = gql
self.user = user
self.repo = repo
self.name = name
self.head = head
self.fetch_size = fetch_size
self.history_size = history_size
def write_to_s3(self, data: Any) -> None:
dir = Path(__file__).resolve().parent.parent / "public" / "statuses"
data[0]["updated_at"] = datetime.datetime.now().timestamp()
file = dir / self.user / self.repo / f"{self.name.replace("/", "_")}.json"
if not file.parent.exists():
print("Not exists!")
file.parent.mkdir(parents=True, exist_ok=True)
print("Making")
with open(file, "w") as f:
f.write(json.dumps(data, indent=2))
# content = json.dumps(data, default=str)
# buf = io.BytesIO()
# gzipfile = gzip.GzipFile(fileobj=buf, mode="w")
# gzipfile.write(content.encode())
# gzipfile.close()
# bucket = s3.Bucket(BUCKET)
# prefix = f"v6/{self.user}/{self.repo}/{self.name.replace("/", "_")}.json"
# bucket.put_object(
# Key=prefix,
# Body=buf.getvalue(),
# ContentType="application/json",
# ContentEncoding="gzip",
# Expires="0",
# )
print(f"Wrote {len(data)} commits from {self.name} to {file}")
def query(self, offset: int) -> str:
after = ""
# The cursor for fetches are formatted like after: "<sha> <offset>", but
# the first commit isn't included, so shift all the offsets and don't
# use an "after" for the first batch
if offset > 0:
after = f', after: "{self.head} {offset - 1}"'
return f"""
{{
repository(name: "{self.repo}", owner: "{self.user}") {{
ref(qualifiedName:"refs/heads/{self.name}") {{
name
target {{
... on Commit {{
history(first:{self.fetch_size}{after}) {{
nodes {{
oid
messageBody
messageHeadline
author {{
name
user {{
login
}}
}}
authoredDate
checkSuites(first:100) {{
edges {{
node {{
checkRuns(first:100) {{
nodes {{
name
status
conclusion
detailsUrl
}}
}}
workflowRun {{
workflow {{
name
}}
}}
}}
}}
}}
status {{
contexts {{
context
state
targetUrl
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
def check_response(self, gql_response: Any) -> None:
# Just check that this path in the dict exists
gql_response["data"]["repository"]["ref"]["target"]["history"]["nodes"]
async def run(self) -> None:
"""
Fetch history for the branch (in batches) and merge them all together
"""
# GitHub's API errors out if you try to fetch too much data at once, so
# split up the 100 commits into batches of 'self.fetch_size'
fetches = math.ceil(self.history_size / self.fetch_size)
async def fetch(i: int) -> Any:
try:
return await self.gql.query(
self.query(offset=self.fetch_size * i), verify=self.check_response
)
except Exception as e:
print(
f"Error: {e}\nFailed to fetch {self.user}/{self.repo}/{self.name} on batch {i} / {fetches}"
)
return None
coros = [fetch(i) for i in range(fetches)]
result = await asyncio.gather(*coros)
raw_commits = []
print(f"Parsing results {self.name}")
# Merge all the batches
for r in result:
if r is None:
continue
try:
commits_batch = r["data"]["repository"]["ref"]["target"]["history"][
"nodes"
]
raw_commits += commits_batch
except Exception as e:
# Errors here are expected if the branch has less than HISTORY_SIZE
# commits (GitHub will just time out). There's no easy way to find
# this number ahead of time and avoid errors, but if we had that
# then we could delete this try-catch.
print(f"Error: Didn't find history in commit batch: {e}\n{r}")
# Pull out the data and format it
commits = extract_jobs(raw_commits)
print(f"Writing results for {self.name} to S3")
# Store gzip'ed data to S3
# print(len(commits))
# print(commits)
self.write_to_s3(commits)
class PRHandler(BranchHandler):
def __init__(
self,
gql: Any,
user: str,
repo: str,
history_size: int,
fetch_size: int,
):
super().__init__(gql, user, repo, "prs", history_size, None, fetch_size)
def query(self, offset: int) -> str:
return f"""
{{
repository(name: "{self.repo}", owner: "{self.user}") {{
pullRequests(last:50) {{
nodes {{
number
title
commits(last:100) {{
nodes {{
commit {{
oid
messageBody
messageHeadline
author {{
name
user {{
login
}}
}}
authoredDate
checkSuites(first:5) {{
edges {{
node {{
checkRuns(first:5) {{
nodes {{
name
status
conclusion
detailsUrl
}}
}}
workflowRun {{
workflow {{
name
}}
}}
}}
}}
}}
status {{
contexts {{
context
state
targetUrl
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
async def run(self) -> None:
query = self.query(offset=0)
async def fetch(i: int):
try:
return await self.gql.query(query)
except Exception as e:
print(
f"Error: {e}\nFailed to fetch {self.user}/{self.repo} PRs"
)
return None
results = [await fetch(0)]
# print(results)
commits_to_prs = {}
commits = []
for result in results:
prs = result["data"]["repository"]["pullRequests"]["nodes"]
for pr in prs:
for commit in pr["commits"]["nodes"]:
commit = commit["commit"]
commits_to_prs[commit["oid"]] = pr
commits.append(commit)
data = extract_jobs(commits)
for item in data:
pr = commits_to_prs[item["sha"]]
item["pr"] = pr["number"]
item["pr_title"] = pr["title"]
data = list(reversed(sorted(data, key=lambda x: x["date"])))
self.write_to_s3(data)
class GraphQL:
def __init__(self, session: aiohttp.ClientSession) -> None:
self.session = session
def log_rate_limit(self, headers: Any) -> None:
remaining = headers.get("X-RateLimit-Remaining")
used = headers.get("X-RateLimit-Used")
total = headers.get("X-RateLimit-Limit")
reset_timestamp = int(headers.get("X-RateLimit-Reset", 0)) # type: ignore
reset = datetime.datetime.fromtimestamp(reset_timestamp).strftime(
"%a, %d %b %Y %H:%M:%S"
)
print(
f"[rate limit] Used {used}, {remaining} / {total} remaining, reset at {reset}"
)
async def query(
self,
query: str,
verify: Optional[Callable[[Any], None]] = None,
retries: int = 1,
) -> Any:
"""
Run an authenticated GraphQL query
"""
# Remove unnecessary white space
query = compress_query(query)
if retries <= 0:
raise RuntimeError(f"Query {query[:100]} failed, no retries left")
url = "https://api.github.com/graphql"
try:
async with self.session.post(url, json={"query": query}) as resp:
self.log_rate_limit(resp.headers)
r = await resp.json()
if "data" not in r:
raise RuntimeError(r)
if verify is not None:
verify(r)
return r
except Exception as e:
print(
f"Retrying query {query[:100]}, remaining attempts: {retries - 1}\n{e}"
)
return await self.query(query, verify=verify, retries=retries - 1)
async def main(
user: str, repo: str, branches: List[str], history_size: int, fetch_size: int, prs: bool,
) -> None:
"""
Grab a list of all the head commits for each branch, then fetch all the jobs
for the last 'history_size' commits on that branch
"""
headers = {
"Authorization": f"token {GH_PAT}",
# "Authorization": "token {}".format(user_token(user)),
# "Accept": "application/vnd.github.machine-man-preview+json",
}
async with aiohttp.ClientSession(
headers=headers
) as aiosession:
gql = GraphQL(aiosession)
handlers = []
if prs:
handlers.append(PRHandler(gql, user, repo, history_size, fetch_size))
else:
print(f"Querying branches: {branches}")
heads = await gql.query(head_commit_query(user, repo, branches))
for head in heads["data"].values():
sha = head["ref"]["target"]["oid"]
branch = head["ref"]["name"]
handlers.append(
BranchHandler(gql, user, repo, branch, sha, history_size, fetch_size)
)
await asyncio.gather(*[h.run() for h in handlers])
def lambda_handler(event: Any, context: Any) -> None:
"""
'event' here is the payload configured from EventBridge (or set manually
via environment variables)
"""
data: Dict[str, Any] = {
"branches": None,
"user": None,
"repo": None,
"history_size": None,
"fetch_size": None,
"prs": False,
}
for key in data.keys():
if key in os.environ:
data[key] = os.environ[key]
else:
data[key] = event[key]
if event["prs"]:
data["branches"] = "do not use"
if any(x is None for x in data.values()):
raise RuntimeError(
"Data missing from configuration, it must be set as an environment "
f"variable or as the input JSON payload in the Lambda event:\n{data}"
)
data["history_size"] = int(data["history_size"])
data["fetch_size"] = int(data["fetch_size"])
data["branches"] = data["branches"].split(",")
data["prs"] = data["prs"]
# return
asyncio.run(main(**data))
# if os.getenv("DEBUG", "0") == "1":
# # For local development
# lambda_handler(
# {
# "branches": "release/1.10",
# "user": "pytorch",
# "repo": "pytorch",
# "history_size": 100,
# "fetch_size": 10,
# },
# None,
# )
parser = argparse.ArgumentParser(description="Update JSON in S3 for a branch")
parser.add_argument("--branch")
parser.add_argument("--prs", action="store_true")
parser.add_argument("--repo", required=True)
parser.add_argument("--user", required=True)
parser.add_argument("--fetch_size", default=10)
parser.add_argument("--history_size", default=100)
args = parser.parse_args()
if not args.prs and args.branch is None:
raise RuntimeError("--prs or --branch <branch> must be used!")
lambda_handler(
{
"branches": args.branch,
"user": args.user,
"repo": args.repo,
"prs": args.prs,
"history_size": int(args.history_size),
"fetch_size": int(args.fetch_size),
},
None,
)
| import asyncio
import aiohttp # type: ignore
import math
import os
import datetime
import re
# import boto3 # type: ignore
import json
import io
import argparse
import gzip
from pathlib import Path
import os
# from cryptography.hazmat.backends import default_backend
# import jwt
# import requests
import time
import requests
from typing import *
# BUCKET = os.getenv("bucket", "ossci-job-status")
# APP_ID = int(os.environ["app_id"])
# # The private key needs to maintain its newlines, get it via
# # $ cat key.pem | tr '\n' '|' | pbcopy
# PRIVATE_KEY = os.environ["private_key"].replace("|", "\n")
GH_PAT = os.environ["GH_PAT"]
def app_headers() -> Dict[str, str]:
headers = {
"Authorization": f"Bearer {GH_PAT}",
"Accept": "application/vnd.github.machine-man-preview+json",
}
return headers
cert_bytes = PRIVATE_KEY.encode()
private_key = default_backend().load_pem_private_key(cert_bytes, None) # type: ignore
time_since_epoch_in_seconds = int(time.time())
payload = {
# issued at time
"iat": time_since_epoch_in_seconds,
# JWT expiration time (10 minute maximum)
"exp": time_since_epoch_in_seconds + (10 * 60),
# GitHub App's identifier
"iss": APP_ID,
}
actual_jwt = jwt.encode(payload, private_key, algorithm="RS256")
headers = {
"Authorization": f"Bearer {actual_jwt}",
"Accept": "application/vnd.github.machine-man-preview+json",
}
return headers
def jprint(obj: Any) -> None:
print(json.dumps(obj, indent=2))
def installation_id(user: str) -> int:
r_bytes = requests.get(
"https://api.github.com/app/installations", headers=app_headers()
)
r = json.loads(r_bytes.content.decode())
for item in r:
if item["account"]["login"] == user:
return int(item["id"])
raise RuntimeError(f"User {user} not found in {r}")
def user_token(user: str) -> str:
"""
Authorize this request with the GitHub app set by the 'app_id' and
'private_key' environment variables.
1. Get the installation ID for the user that has installed the app
2. Request a new token for that user
3. Return it so it can be used in future API requests
"""
# Hardcode the installation to PyTorch so we can always get a valid ID key
id = installation_id("pytorch")
url = f"https://api.github.com/app/installations/{id}/access_tokens"
r_bytes = requests.post(url, headers=app_headers())
r = json.loads(r_bytes.content.decode())
token = str(r["token"])
return token
# if "AWS_KEY_ID" in os.environ and "AWS_SECRET_KEY" in os.environ:
# # Use keys for local development
# session = boto3.Session(
# aws_access_key_id=os.environ.get("AWS_KEY_ID"),
# aws_secret_access_key=os.environ.get("AWS_SECRET_KEY"),
# )
# else:
# # In the Lambda, use permissions on the Lambda's role
# session = boto3.Session()
# s3 = session.resource("s3")
def compress_query(query: str) -> str:
query = query.replace("\n", "")
query = re.sub("\s+", " ", query)
return query
def head_commit_query(user: str, repo: str, branches: List[str]) -> str:
"""
Fetch the head commit for a list of branches
"""
def branch_part(branch: str, num: int) -> str:
return f"""
r{num}: repository(name: "{repo}", owner: "{user}") {{
ref(qualifiedName:"refs/heads/{branch}") {{
name
target {{
... on Commit {{
oid
}}
}}
}}
}}
"""
parts = [branch_part(branch, i) for i, branch in enumerate(branches)]
return "{" + "\n".join(parts) + "}"
def extract_gha(suites: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for suite in suites:
suite = suite["node"]
if suite["workflowRun"] is None:
# If no jobs were triggered this will be empty
continue
workflow = suite["workflowRun"]["workflow"]["name"]
for run in suite["checkRuns"]["nodes"]:
conclusion = run["conclusion"]
if conclusion is None:
if run["status"].lower() == "queued":
conclusion = "queued"
elif run["status"].lower() == "in_progress":
conclusion = "pending"
else:
raise RuntimeError(f"unexpected run {run}")
jobs.append(
{
"name": f"{workflow} / {run['name']}",
"status": conclusion.lower(),
"url": run["detailsUrl"],
}
)
return jobs
def extract_status(contexts: List[Dict[str, Any]]) -> List[Dict[str, str]]:
jobs = []
for context in contexts:
jobs.append(
{
"name": context["context"],
"status": context["state"].lower(),
"url": context["targetUrl"],
}
)
return jobs
def extract_jobs(raw_commits: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
commits = []
for raw_commit in raw_commits:
if raw_commit["status"] is None:
# Will be none if no non-GHA jobs were triggered
status = []
else:
status = extract_status(raw_commit["status"]["contexts"])
gha = extract_gha(raw_commit["checkSuites"]["edges"])
jobs = status + gha
if raw_commit["author"]["user"] is None:
author = raw_commit["author"]["name"]
else:
author = raw_commit["author"]["user"]["login"]
commits.append(
{
"sha": raw_commit["oid"],
"headline": raw_commit["messageHeadline"],
"body": raw_commit["messageBody"],
"author": author,
"date": raw_commit["authoredDate"],
"jobs": jobs,
}
)
return commits
class BranchHandler:
def __init__(
self,
gql: Any,
user: str,
repo: str,
name: str,
head: str,
history_size: int,
fetch_size: int,
):
self.gql = gql
self.user = user
self.repo = repo
self.name = name
self.head = head
self.fetch_size = fetch_size
self.history_size = history_size
def write_to_s3(self, data: Any) -> None:
dir = Path(__file__).resolve().parent.parent / "public" / "statuses"
data[0]["updated_at"] = datetime.datetime.now().timestamp()
file = dir / self.user / self.repo / f"{self.name.replace('/', '_')}.json"
if not file.parent.exists():
print("Not exists!")
file.parent.mkdir(parents=True, exist_ok=True)
print("Making")
with open(file, "w") as f:
f.write(json.dumps(data, indent=2))
# content = json.dumps(data, default=str)
# buf = io.BytesIO()
# gzipfile = gzip.GzipFile(fileobj=buf, mode="w")
# gzipfile.write(content.encode())
# gzipfile.close()
# bucket = s3.Bucket(BUCKET)
# prefix = f"v6/{self.user}/{self.repo}/{self.name.replace('/', '_')}.json"
# bucket.put_object(
# Key=prefix,
# Body=buf.getvalue(),
# ContentType="application/json",
# ContentEncoding="gzip",
# Expires="0",
# )
print(f"Wrote {len(data)} commits from {self.name} to {file}")
def query(self, offset: int) -> str:
after = ""
# The cursor for fetches are formatted like after: "<sha> <offset>", but
# the first commit isn't included, so shift all the offsets and don't
# use an "after" for the first batch
if offset > 0:
after = f', after: "{self.head} {offset - 1}"'
return f"""
{{
repository(name: "{self.repo}", owner: "{self.user}") {{
ref(qualifiedName:"refs/heads/{self.name}") {{
name
target {{
... on Commit {{
history(first:{self.fetch_size}{after}) {{
nodes {{
oid
messageBody
messageHeadline
author {{
name
user {{
login
}}
}}
authoredDate
checkSuites(first:100) {{
edges {{
node {{
checkRuns(first:100) {{
nodes {{
name
status
conclusion
detailsUrl
}}
}}
workflowRun {{
workflow {{
name
}}
}}
}}
}}
}}
status {{
contexts {{
context
state
targetUrl
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
def check_response(self, gql_response: Any) -> None:
# Just check that this path in the dict exists
gql_response["data"]["repository"]["ref"]["target"]["history"]["nodes"]
async def run(self) -> None:
"""
Fetch history for the branch (in batches) and merge them all together
"""
# GitHub's API errors out if you try to fetch too much data at once, so
# split up the 100 commits into batches of 'self.fetch_size'
fetches = math.ceil(self.history_size / self.fetch_size)
async def fetch(i: int) -> Any:
try:
return await self.gql.query(
self.query(offset=self.fetch_size * i), verify=self.check_response
)
except Exception as e:
print(
f"Error: {e}\nFailed to fetch {self.user}/{self.repo}/{self.name} on batch {i} / {fetches}"
)
return None
coros = [fetch(i) for i in range(fetches)]
result = await asyncio.gather(*coros)
raw_commits = []
print(f"Parsing results {self.name}")
# Merge all the batches
for r in result:
if r is None:
continue
try:
commits_batch = r["data"]["repository"]["ref"]["target"]["history"][
"nodes"
]
raw_commits += commits_batch
except Exception as e:
# Errors here are expected if the branch has less than HISTORY_SIZE
# commits (GitHub will just time out). There's no easy way to find
# this number ahead of time and avoid errors, but if we had that
# then we could delete this try-catch.
print(f"Error: Didn't find history in commit batch: {e}\n{r}")
# Pull out the data and format it
commits = extract_jobs(raw_commits)
print(f"Writing results for {self.name} to S3")
# Store gzip'ed data to S3
# print(len(commits))
# print(commits)
self.write_to_s3(commits)
class PRHandler(BranchHandler):
def __init__(
self,
gql: Any,
user: str,
repo: str,
history_size: int,
fetch_size: int,
):
super().__init__(gql, user, repo, "prs", history_size, None, fetch_size)
def query(self, offset: int) -> str:
return f"""
{{
repository(name: "{self.repo}", owner: "{self.user}") {{
pullRequests(last:50) {{
nodes {{
number
title
commits(last:100) {{
nodes {{
commit {{
oid
messageBody
messageHeadline
author {{
name
user {{
login
}}
}}
authoredDate
checkSuites(first:5) {{
edges {{
node {{
checkRuns(first:5) {{
nodes {{
name
status
conclusion
detailsUrl
}}
}}
workflowRun {{
workflow {{
name
}}
}}
}}
}}
}}
status {{
contexts {{
context
state
targetUrl
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
async def run(self) -> None:
query = self.query(offset=0)
async def fetch(i: int):
try:
return await self.gql.query(query)
except Exception as e:
print(
f"Error: {e}\nFailed to fetch {self.user}/{self.repo} PRs"
)
return None
results = [await fetch(0)]
# print(results)
commits_to_prs = {}
commits = []
for result in results:
prs = result["data"]["repository"]["pullRequests"]["nodes"]
for pr in prs:
for commit in pr["commits"]["nodes"]:
commit = commit["commit"]
commits_to_prs[commit["oid"]] = pr
commits.append(commit)
data = extract_jobs(commits)
for item in data:
pr = commits_to_prs[item["sha"]]
item["pr"] = pr["number"]
item["pr_title"] = pr["title"]
data = list(reversed(sorted(data, key=lambda x: x["date"])))
self.write_to_s3(data)
class GraphQL:
def __init__(self, session: aiohttp.ClientSession) -> None:
self.session = session
def log_rate_limit(self, headers: Any) -> None:
remaining = headers.get("X-RateLimit-Remaining")
used = headers.get("X-RateLimit-Used")
total = headers.get("X-RateLimit-Limit")
reset_timestamp = int(headers.get("X-RateLimit-Reset", 0)) # type: ignore
reset = datetime.datetime.fromtimestamp(reset_timestamp).strftime(
"%a, %d %b %Y %H:%M:%S"
)
print(
f"[rate limit] Used {used}, {remaining} / {total} remaining, reset at {reset}"
)
async def query(
self,
query: str,
verify: Optional[Callable[[Any], None]] = None,
retries: int = 1,
) -> Any:
"""
Run an authenticated GraphQL query
"""
# Remove unnecessary white space
query = compress_query(query)
if retries <= 0:
raise RuntimeError(f"Query {query[:100]} failed, no retries left")
url = "https://api.github.com/graphql"
try:
async with self.session.post(url, json={"query": query}) as resp:
self.log_rate_limit(resp.headers)
r = await resp.json()
if "data" not in r:
raise RuntimeError(r)
if verify is not None:
verify(r)
return r
except Exception as e:
print(
f"Retrying query {query[:100]}, remaining attempts: {retries - 1}\n{e}"
)
return await self.query(query, verify=verify, retries=retries - 1)
async def main(
user: str, repo: str, branches: List[str], history_size: int, fetch_size: int, prs: bool,
) -> None:
"""
Grab a list of all the head commits for each branch, then fetch all the jobs
for the last 'history_size' commits on that branch
"""
headers = {
"Authorization": f"token {GH_PAT}",
# "Authorization": "token {}".format(user_token(user)),
# "Accept": "application/vnd.github.machine-man-preview+json",
}
async with aiohttp.ClientSession(
headers=headers
) as aiosession:
gql = GraphQL(aiosession)
handlers = []
if prs:
handlers.append(PRHandler(gql, user, repo, history_size, fetch_size))
else:
print(f"Querying branches: {branches}")
heads = await gql.query(head_commit_query(user, repo, branches))
for head in heads["data"].values():
sha = head["ref"]["target"]["oid"]
branch = head["ref"]["name"]
handlers.append(
BranchHandler(gql, user, repo, branch, sha, history_size, fetch_size)
)
await asyncio.gather(*[h.run() for h in handlers])
def lambda_handler(event: Any, context: Any) -> None:
"""
'event' here is the payload configured from EventBridge (or set manually
via environment variables)
"""
data: Dict[str, Any] = {
"branches": None,
"user": None,
"repo": None,
"history_size": None,
"fetch_size": None,
"prs": False,
}
for key in data.keys():
if key in os.environ:
data[key] = os.environ[key]
else:
data[key] = event[key]
if event["prs"]:
data["branches"] = "do not use"
if any(x is None for x in data.values()):
raise RuntimeError(
"Data missing from configuration, it must be set as an environment "
f"variable or as the input JSON payload in the Lambda event:\n{data}"
)
data["history_size"] = int(data["history_size"])
data["fetch_size"] = int(data["fetch_size"])
data["branches"] = data["branches"].split(",")
data["prs"] = data["prs"]
# return
asyncio.run(main(**data))
# if os.getenv("DEBUG", "0") == "1":
# # For local development
# lambda_handler(
# {
# "branches": "release/1.10",
# "user": "pytorch",
# "repo": "pytorch",
# "history_size": 100,
# "fetch_size": 10,
# },
# None,
# )
parser = argparse.ArgumentParser(description="Update JSON in S3 for a branch")
parser.add_argument("--branch")
parser.add_argument("--prs", action="store_true")
parser.add_argument("--repo", required=True)
parser.add_argument("--user", required=True)
parser.add_argument("--fetch_size", default=10)
parser.add_argument("--history_size", default=100)
args = parser.parse_args()
if not args.prs and args.branch is None:
raise RuntimeError("--prs or --branch <branch> must be used!")
lambda_handler(
{
"branches": args.branch,
"user": args.user,
"repo": args.repo,
"prs": args.prs,
"history_size": int(args.history_size),
"fetch_size": int(args.fetch_size),
},
None,
)
|
import json
import shutil
import argparse
from tqdm import tqdm
from utils import get_current_time
from tools.supervisely_utils import *
os.makedirs('logs', exist_ok=True)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d.%m.%Y %I:%M:%S',
filename='logs/{:s}.log'.format(Path(__file__).stem),
filemode='w',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(
input_dir: str,
class_groups: dict,
class_ids: dict,
df_project: pd.DataFrame,
output_dir: str,
) -> None:
os.makedirs(output_dir) if not os.path.isdir(output_dir) else False
shutil.copyfile(f'{input_dir}/meta.json', f'{output_dir}/meta.json')
shutil.copyfile(f'{input_dir}/obj_class_to_machine_color.json', f'{output_dir}/obj_class_to_machine_color.json')
datasets = list(set(df_project.dataset))
for dataset in datasets:
df_dataset = df_project[df_project.dataset == dataset]
if not os.path.isdir(os.path.join(output_dir, dataset)):
os.makedirs(os.path.join(output_dir, dataset, 'img'))
os.makedirs(os.path.join(output_dir, dataset, 'ann'))
for idx, row in tqdm(df_dataset.iterrows(), desc='Updating Supervisely dataset', unit=' ann'):
filename = row['filename']
img_path = row['img_path']
img_name = Path(img_path).name
shutil.copyfile(f'{img_path}', f'{output_dir}/{dataset}/img/{img_name}')
ann_path = row['ann_path']
ann_name = Path(ann_path).name
f = open(ann_path)
ann_data = json.load(f)
if len(ann_data['objects']) > 0:
new_objects = []
for obj in ann_data['objects']:
# Process walls
if obj['classTitle'] in class_groups:
wall_mask64 = obj['bitmap']['data']
wall_mask = base64_to_mask(wall_mask64)
filled_mask = binary_fill_holes(wall_mask.copy(), structure=None)
filled_mask = 255 * filled_mask.astype(np.uint8)
lumen_mask = filled_mask - wall_mask
# Check the size of a lumen mask
if cv2.countNonZero(lumen_mask) > 0:
# Append a wall object
new_objects.append(obj)
# Append a lumen object
lumen_mask = mask_to_base64(lumen_mask)
class_name = class_groups[obj['classTitle']]
new_objects.append(
{
'classId': class_ids[class_name],
'description': '',
'geometryType': 'bitmap',
'lablerLogin': 'Hunter_911',
'createdAt': get_current_time(),
'updatedAt': get_current_time(),
'tags': [],
'classTitle': class_name,
'bitmap': {
'data': lumen_mask,
'origin': [
int(obj['bitmap']['origin'][0]),
int(obj['bitmap']['origin'][1])
]
}
}
)
else:
logger.warning(f'The object {obj['classTitle']} has no cavity, filename {filename}')
# Process lumens
elif obj['classTitle'] in class_ids:
continue
# Process other objects
else:
new_objects.append(obj)
ann_data['objects'] = new_objects
with open(f'{output_dir}/{dataset}/ann/{ann_name}', 'w') as outfile:
json.dump(ann_data, outfile)
else:
shutil.copyfile(f'{ann_path}', f'{output_dir}/{dataset}/ann/{ann_name}')
if __name__ == '__main__':
CLASS_GROUPS = {
'Capillary wall': 'Capillary lumen',
'Arteriole wall': 'Arteriole lumen',
'Venule wall': 'Venule lumen',
}
CLASS_IDS = {
'Capillary lumen': 9945624,
'Arteriole lumen': 9944173,
'Venule lumen': 9944174,
}
parser = argparse.ArgumentParser(description='Update mask dataset')
parser.add_argument('--project_dir', required=True, type=str)
parser.add_argument('--include_dirs', nargs='+', default=None, type=str)
parser.add_argument('--exclude_dirs', nargs='+', default=None, type=str)
parser.add_argument('--save_dir', required=True, type=str)
args = parser.parse_args()
df = read_sly_project(
project_dir=args.project_dir,
include_dirs=args.include_dirs,
exclude_dirs=args.exclude_dirs,
)
main(
input_dir=args.project_dir,
class_groups=CLASS_GROUPS,
class_ids=CLASS_IDS,
df_project=df,
output_dir=args.save_dir,
)
| import json
import shutil
import argparse
from tqdm import tqdm
from utils import get_current_time
from tools.supervisely_utils import *
os.makedirs('logs', exist_ok=True)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d.%m.%Y %I:%M:%S',
filename='logs/{:s}.log'.format(Path(__file__).stem),
filemode='w',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main(
input_dir: str,
class_groups: dict,
class_ids: dict,
df_project: pd.DataFrame,
output_dir: str,
) -> None:
os.makedirs(output_dir) if not os.path.isdir(output_dir) else False
shutil.copyfile(f'{input_dir}/meta.json', f'{output_dir}/meta.json')
shutil.copyfile(f'{input_dir}/obj_class_to_machine_color.json', f'{output_dir}/obj_class_to_machine_color.json')
datasets = list(set(df_project.dataset))
for dataset in datasets:
df_dataset = df_project[df_project.dataset == dataset]
if not os.path.isdir(os.path.join(output_dir, dataset)):
os.makedirs(os.path.join(output_dir, dataset, 'img'))
os.makedirs(os.path.join(output_dir, dataset, 'ann'))
for idx, row in tqdm(df_dataset.iterrows(), desc='Updating Supervisely dataset', unit=' ann'):
filename = row['filename']
img_path = row['img_path']
img_name = Path(img_path).name
shutil.copyfile(f'{img_path}', f'{output_dir}/{dataset}/img/{img_name}')
ann_path = row['ann_path']
ann_name = Path(ann_path).name
f = open(ann_path)
ann_data = json.load(f)
if len(ann_data['objects']) > 0:
new_objects = []
for obj in ann_data['objects']:
# Process walls
if obj['classTitle'] in class_groups:
wall_mask64 = obj['bitmap']['data']
wall_mask = base64_to_mask(wall_mask64)
filled_mask = binary_fill_holes(wall_mask.copy(), structure=None)
filled_mask = 255 * filled_mask.astype(np.uint8)
lumen_mask = filled_mask - wall_mask
# Check the size of a lumen mask
if cv2.countNonZero(lumen_mask) > 0:
# Append a wall object
new_objects.append(obj)
# Append a lumen object
lumen_mask = mask_to_base64(lumen_mask)
class_name = class_groups[obj['classTitle']]
new_objects.append(
{
'classId': class_ids[class_name],
'description': '',
'geometryType': 'bitmap',
'lablerLogin': 'Hunter_911',
'createdAt': get_current_time(),
'updatedAt': get_current_time(),
'tags': [],
'classTitle': class_name,
'bitmap': {
'data': lumen_mask,
'origin': [
int(obj['bitmap']['origin'][0]),
int(obj['bitmap']['origin'][1])
]
}
}
)
else:
logger.warning(f'The object {obj["classTitle"]} has no cavity, filename {filename}')
# Process lumens
elif obj['classTitle'] in class_ids:
continue
# Process other objects
else:
new_objects.append(obj)
ann_data['objects'] = new_objects
with open(f'{output_dir}/{dataset}/ann/{ann_name}', 'w') as outfile:
json.dump(ann_data, outfile)
else:
shutil.copyfile(f'{ann_path}', f'{output_dir}/{dataset}/ann/{ann_name}')
if __name__ == '__main__':
CLASS_GROUPS = {
'Capillary wall': 'Capillary lumen',
'Arteriole wall': 'Arteriole lumen',
'Venule wall': 'Venule lumen',
}
CLASS_IDS = {
'Capillary lumen': 9945624,
'Arteriole lumen': 9944173,
'Venule lumen': 9944174,
}
parser = argparse.ArgumentParser(description='Update mask dataset')
parser.add_argument('--project_dir', required=True, type=str)
parser.add_argument('--include_dirs', nargs='+', default=None, type=str)
parser.add_argument('--exclude_dirs', nargs='+', default=None, type=str)
parser.add_argument('--save_dir', required=True, type=str)
args = parser.parse_args()
df = read_sly_project(
project_dir=args.project_dir,
include_dirs=args.include_dirs,
exclude_dirs=args.exclude_dirs,
)
main(
input_dir=args.project_dir,
class_groups=CLASS_GROUPS,
class_ids=CLASS_IDS,
df_project=df,
output_dir=args.save_dir,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import json
import textwrap
import unittest
from typing import Dict, List, Optional, Tuple
from unittest.mock import call, patch
from .. import UserError, errors
from ..ast import UnstableAST
from ..errors import (
Errors,
PartialErrorSuppression,
SkippingGeneratedFileException,
SkippingUnparseableFileException,
_map_line_to_start_of_range,
_get_unused_ignore_codes,
_line_ranges_spanned_by_format_strings,
_remove_unused_ignores,
_relocate_errors,
_suppress_errors,
)
unittest.util._MAX_LENGTH = 200
class ErrorsTest(unittest.TestCase):
def test_from_json(self) -> None:
self.assertEqual(
Errors.from_json('[{ "path": "test.py", "key": "value" }]'),
Errors([{"path": "test.py", "key": "value"}]),
)
with patch(
"sys.stdin.read", return_value='[{ "path": "test.py", "key": "value" }]'
):
self.assertEqual(
Errors.from_stdin(), Errors([{"path": "test.py", "key": "value"}])
)
self.assertEqual(
Errors.from_json(
json.dumps(
[
{"path": "test.py", "key": "value", "code": 1},
{"path": "test.py", "key": "value", "code": 2},
]
),
only_fix_error_code=1,
),
Errors([{"path": "test.py", "key": "value", "code": 1}]),
)
with patch(
"sys.stdin.read",
return_value=json.dumps(
[
{"path": "test.py", "key": "value", "code": 1},
{"path": "test.py", "key": "value", "code": 2},
]
),
):
self.assertEqual(
Errors.from_stdin(only_fix_error_code=1),
Errors([{"path": "test.py", "key": "value", "code": 1}]),
)
with self.assertRaises(UserError):
Errors.from_json('[{ "path": "test.py", "key": "value" }')
def test_paths_to_errors(self) -> None:
errors = Errors(
[
{"path": "test1.py", "key": "value", "code": 1},
{"path": "test2.py", "key": "value", "code": 2},
{"path": "test1.py", "key": "value", "code": 3},
]
)
self.assertEqual(
errors.paths_to_errors,
{
"test1.py": [
{"code": 1, "key": "value", "path": "test1.py"},
{"code": 3, "key": "value", "path": "test1.py"},
],
"test2.py": [{"code": 2, "key": "value", "path": "test2.py"}],
},
)
@patch.object(errors.Path, "read_text", return_value="")
@patch.object(errors.Path, "write_text")
def test_suppress(self, path_write_text, path_read_text) -> None:
# Test run on multiple files.
with patch(f"{errors.__name__}._suppress_errors", return_value="<transformed>"):
Errors(
[
{
"path": "path.py",
"line": 1,
"concise_description": "Error [1]: description",
},
{
"path": "other.py",
"line": 2,
"concise_description": "Error [2]: description",
},
]
).suppress()
path_read_text.assert_has_calls([call(), call()])
path_write_text.assert_has_calls(
[call("<transformed>"), call("<transformed>")]
)
with patch(f"{errors.__name__}._suppress_errors", side_effect=UnstableAST()):
with self.assertRaises(PartialErrorSuppression) as context:
Errors(
[
{
"path": "path.py",
"line": 1,
"concise_description": "Error [1]: description",
},
{
"path": "other.py",
"line": 2,
"concise_description": "Error [2]: description",
},
]
).suppress()
self.assertEqual(
set(context.exception.unsuppressed_paths), {"path.py", "other.py"}
)
def test_get_unused_ignore_codes(self) -> None:
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "0",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[1, 9],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "0",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
},
{
"code": "0",
"description": "The `pyre-ignore[2]` or `pyre-fixme[2]` "
+ "comment is not suppressing type errors, please remove it.",
},
]
),
[1, 2, 9],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "1",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "1",
"description": "The `pyre-ignore[]` or `pyre-fixme[]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[],
)
@patch.object(errors, "_get_unused_ignore_codes")
def test_remove_unused_ignores(self, get_unused_ignore_codes) -> None:
get_unused_ignore_codes.return_value = [1, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[1, 2, 3, 4]: Comment", []),
"# pyre-fixme[2]: Comment",
)
get_unused_ignore_codes.return_value = [1, 2, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[1, 2, 3, 4]: Comment", []), ""
)
get_unused_ignore_codes.return_value = [1]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[2, 3, 4]: Comment", []),
"# pyre-fixme[2, 3, 4]: Comment",
)
get_unused_ignore_codes.return_value = [1, 2]
self.assertEqual(_remove_unused_ignores("# pyre-fixme: Comment", []), "")
get_unused_ignore_codes.return_value = [1, 2]
self.assertEqual(
_remove_unused_ignores(
"# Unrelated comment. # pyre-fixme[1, 2]: Comment", []
),
"# Unrelated comment.",
)
get_unused_ignore_codes.return_value = [1, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme [1, 2, 3, 4]: Comment", []),
"# pyre-fixme [2]: Comment",
)
def assertSuppressErrors(
self,
errors: Dict[int, List[Dict[str, str]]],
input: str,
expected_output: str,
*,
custom_comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> None:
def _normalize(input: str) -> str:
return textwrap.dedent(input).strip().replace("FIXME", "pyre-fixme")
self.assertEqual(
_suppress_errors(
_normalize(input),
errors,
custom_comment,
max_line_length,
truncate,
unsafe,
),
_normalize(expected_output),
)
def test_suppress_errors(self) -> None:
self.assertSuppressErrors(
{},
"""
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
# Basic error suppression
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description
def foo() -> None: pass
""",
)
# Avoid duplicate error messages
self.assertSuppressErrors(
{
1: [
{"code": "1", "description": "description 1"},
{"code": "2", "description": "description duplicate"},
{"code": "2", "description": "description duplicate"},
{"code": "1", "description": "description 2"},
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description 1
# FIXME[2]: description duplicate
# FIXME[1]: description 2
def foo() -> None: pass
""",
)
# Indentation is correct.
self.assertSuppressErrors(
{2: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
pass
""",
"""
def foo() -> None:
# FIXME[1]: description
pass
""",
)
# Skip files with parse errors.
with self.assertRaises(SkippingUnparseableFileException):
_suppress_errors(
"input", {1: [{"code": "404", "description": "description"}]}
)
# Skip generated files.
with self.assertRaises(SkippingGeneratedFileException):
_suppress_errors("# @" "generated", {})
# Do not check for generated files with --unsafe.
try:
_suppress_errors(
"# @" "generated",
{},
custom_comment=None,
max_line_length=None,
truncate=False,
unsafe=True,
)
except SkippingGeneratedFileException:
self.fail("Unexpected `SkippingGeneratedFileException` exception.")
# Custom message.
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: T1234
def foo() -> None: pass
""",
custom_comment="T1234",
)
# Existing Comment
self.assertSuppressErrors(
{2: [{"code": "1", "description": "description"}]},
"""
# comment
def foo() -> None: pass
""",
"""
# comment
# FIXME[1]: description
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
def foo() -> None: # FIXME[1]
# comment
pass
""",
"""
def foo() -> None:
# comment
pass
""",
)
# Multiple Errors
self.assertSuppressErrors(
{
1: [{"code": "1", "description": "description"}],
2: [{"code": "2", "description": "description"}],
},
"""
def foo() -> None:
pass
""",
"""
# FIXME[1]: description
def foo() -> None:
# FIXME[2]: description
pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description
# FIXME[2]: description
def foo() -> None: pass
""",
)
# Line length limit
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]:
# description
def foo() -> None: pass
""",
max_line_length=20,
)
# Remove unused ignores.
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
# over multple lines
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
# over multple lines
# FIXME[1]: description
def foo() -> None: pass
""",
"""
# FIXME[1]: description
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
def foo() -> None: pass # FIXME[0]: ignore
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [{"code": "0", "description": "description"}],
2: [{"code": "0", "description": "description"}],
},
"""
# FIXME[1]: ignore
# FIXME[2]: ignore
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "0", "description": "description"},
{"code": "2", "description": "new error"},
]
},
"""
def foo() -> None: pass # FIXME[1]
""",
"""
# FIXME[2]: new error
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "2", "description": "new error"},
{"code": "0", "description": "description"},
]
},
"""
def foo() -> None: pass # FIXME[1]
""",
"""
# FIXME[2]: new error
def foo() -> None: pass
""",
)
# Remove unused ignores by error code.
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1]` or `pyre-fixme[1]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
def foo() -> None: pass # FIXME[1, 2]
""",
"""
def foo() -> None: pass # FIXME[2]
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 2, 3]
# Continuation comment.
def foo() -> None: pass
""",
"""
# FIXME[2]
# Continuation comment.
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 2, 3]: Comment[Comment]
def foo() -> None: pass
""",
"""
# FIXME[2]: Comment[Comment]
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 3]
# Continuation comment.
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
],
2: [{"code": "4", "description": "Description."}],
},
"""
# FIXME[1, 2, 3]
def foo() -> None: pass
""",
"""
# FIXME[2]
# FIXME[4]: Description.
def foo() -> None: pass
""",
)
# Truncate long comments.
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: descr...
def foo() -> None: pass
""",
max_line_length=25,
truncate=True,
)
self.assertSuppressErrors(
{
1: [
{
"code": "1",
"description": "this description takes up over four lines \
of content when it is split, given the max line length",
}
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: this ...
def foo() -> None: pass
""",
max_line_length=25,
)
# Line breaks without errors.
self.assertSuppressErrors(
{},
"""
def foo() -> None:
\"\"\"
Random line break that won't parse.
/!\\
Text.
\"\"\"
pass
""",
"""
def foo() -> None:
\"\"\"
Random line break that won't parse.
/!\\
Text.
\"\"\"
pass
""",
)
# Line breaks.
self.assertSuppressErrors(
{
3: [{"code": "1", "description": "description"}],
4: [{"code": "2", "description": "description"}],
},
"""
def foo() -> None:
x = something + \\
error() + \\
error() # unrelated comment
""",
"""
def foo() -> None:
x = (something +
# FIXME[1]: description
error() +
# FIXME[2]: description
error()) # unrelated comment
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
x, y, z = \\
error()
""",
"""
def foo() -> None:
x, y, z = (
# FIXME[1]: description
error())
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
del \\
error
""",
"""
def foo() -> None:
del (
# FIXME[1]: description
error)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
assert \\
test
""",
"""
def foo() -> None:
assert (
# FIXME[1]: description
test)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
assert test + \\
test2
""",
"""
def foo() -> None:
assert (test +
# FIXME[1]: description
test2)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
raise \\
Exception()
""",
"""
def foo() -> None:
raise (
# FIXME[1]: description
Exception())
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
return a + \\
error
""",
"""
def foo() -> None:
return (a +
# FIXME[1]: description
error)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
return \\
error
""",
"""
def foo() -> None:
return (
# FIXME[1]: description
error)
""",
)
def test_suppress_errors__long_class_name(self) -> None:
self.assertSuppressErrors(
{
1: [
{
"code": "1",
"description": "This is a \
really.long.class.name.exceeding.twenty.five.Characters",
}
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: This ...
def foo() -> None: pass
""",
max_line_length=25,
)
def test_suppress_errors__manual_import(self) -> None:
self.assertSuppressErrors(
{
3: [{"code": "21", "description": "description"}],
4: [{"code": "21", "description": "description"}],
},
"""
from a import b
# @manual=//special:case
from a import c
from a import d
""",
"""
from a import b
# FIXME[21]: description
# @manual=//special:case
from a import c
# FIXME[21]: description
from a import d
""",
)
def test_suppress_errors__format_string(self) -> None:
self.assertSuppressErrors(
{
4: [
{
"code": "42",
"description": "Some error",
}
],
5: [
{
"code": "42",
"description": "Some error",
},
{
"code": "43",
"description": "Some error",
},
],
},
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
"""
def foo() -> None:
# FIXME[42]: Some error
# FIXME[43]: Some error
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
)
self.assertSuppressErrors(
{
4: [
{
"code": "42",
"description": "Some error 1",
},
{
"code": "42",
"description": "Some error 2",
},
],
},
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
"""
def foo() -> None:
# FIXME[42]: Some error 1
# FIXME[42]: Some error 2
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
)
def assertLinesSpanned(
self, source: str, expected_lines: List[Tuple[int, int]]
) -> None:
self.assertEqual(
list(
_line_ranges_spanned_by_format_strings(textwrap.dedent(source)).values()
),
expected_lines,
)
def test_lines_spanned_by_format_strings(self) -> None:
self.assertLinesSpanned(
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
bar
\"\"\"
f\"\"\"
bar
\"\"\"
""",
[(3, 7), (9, 11)],
)
self.assertLinesSpanned(
"""
def foo() -> None:
f"{1 + "hello"}"
""",
[(3, 3)],
)
# Skip checking of format strings in case libcst barfs on the parsing.
self.assertLinesSpanned(
"""
def cannot_parse()
""",
[],
)
def test_map_line_to_start_of_range(self) -> None:
self.assertEqual(
_map_line_to_start_of_range([(3, 3), (3, 5), (9, 13)]),
{3: 3, 4: 3, 5: 3, 9: 9, 10: 9, 11: 9, 12: 9, 13: 9},
)
self.assertEqual(
_map_line_to_start_of_range([]),
{},
)
# Intervals shouldn't overlap, but if they do, we will prefer the earlier one.
self.assertEqual(
_map_line_to_start_of_range([(3, 5), (4, 6)]),
{3: 3, 4: 3, 5: 3, 6: 4},
)
def test_relocate_errors(self) -> None:
errors = {
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
],
2: [
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
],
3: [
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
}
self.assertEqual(
_relocate_errors(
errors,
{},
),
errors,
)
self.assertEqual(
_relocate_errors(
errors,
{2: 1, 3: 1},
),
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
},
)
self.assertEqual(
_relocate_errors(
errors,
{1: 1, 2: 2, 3: 2},
),
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
],
2: [
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
},
)
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import json
import textwrap
import unittest
from typing import Dict, List, Optional, Tuple
from unittest.mock import call, patch
from .. import UserError, errors
from ..ast import UnstableAST
from ..errors import (
Errors,
PartialErrorSuppression,
SkippingGeneratedFileException,
SkippingUnparseableFileException,
_map_line_to_start_of_range,
_get_unused_ignore_codes,
_line_ranges_spanned_by_format_strings,
_remove_unused_ignores,
_relocate_errors,
_suppress_errors,
)
unittest.util._MAX_LENGTH = 200
class ErrorsTest(unittest.TestCase):
def test_from_json(self) -> None:
self.assertEqual(
Errors.from_json('[{ "path": "test.py", "key": "value" }]'),
Errors([{"path": "test.py", "key": "value"}]),
)
with patch(
"sys.stdin.read", return_value='[{ "path": "test.py", "key": "value" }]'
):
self.assertEqual(
Errors.from_stdin(), Errors([{"path": "test.py", "key": "value"}])
)
self.assertEqual(
Errors.from_json(
json.dumps(
[
{"path": "test.py", "key": "value", "code": 1},
{"path": "test.py", "key": "value", "code": 2},
]
),
only_fix_error_code=1,
),
Errors([{"path": "test.py", "key": "value", "code": 1}]),
)
with patch(
"sys.stdin.read",
return_value=json.dumps(
[
{"path": "test.py", "key": "value", "code": 1},
{"path": "test.py", "key": "value", "code": 2},
]
),
):
self.assertEqual(
Errors.from_stdin(only_fix_error_code=1),
Errors([{"path": "test.py", "key": "value", "code": 1}]),
)
with self.assertRaises(UserError):
Errors.from_json('[{ "path": "test.py", "key": "value" }')
def test_paths_to_errors(self) -> None:
errors = Errors(
[
{"path": "test1.py", "key": "value", "code": 1},
{"path": "test2.py", "key": "value", "code": 2},
{"path": "test1.py", "key": "value", "code": 3},
]
)
self.assertEqual(
errors.paths_to_errors,
{
"test1.py": [
{"code": 1, "key": "value", "path": "test1.py"},
{"code": 3, "key": "value", "path": "test1.py"},
],
"test2.py": [{"code": 2, "key": "value", "path": "test2.py"}],
},
)
@patch.object(errors.Path, "read_text", return_value="")
@patch.object(errors.Path, "write_text")
def test_suppress(self, path_write_text, path_read_text) -> None:
# Test run on multiple files.
with patch(f"{errors.__name__}._suppress_errors", return_value="<transformed>"):
Errors(
[
{
"path": "path.py",
"line": 1,
"concise_description": "Error [1]: description",
},
{
"path": "other.py",
"line": 2,
"concise_description": "Error [2]: description",
},
]
).suppress()
path_read_text.assert_has_calls([call(), call()])
path_write_text.assert_has_calls(
[call("<transformed>"), call("<transformed>")]
)
with patch(f"{errors.__name__}._suppress_errors", side_effect=UnstableAST()):
with self.assertRaises(PartialErrorSuppression) as context:
Errors(
[
{
"path": "path.py",
"line": 1,
"concise_description": "Error [1]: description",
},
{
"path": "other.py",
"line": 2,
"concise_description": "Error [2]: description",
},
]
).suppress()
self.assertEqual(
set(context.exception.unsuppressed_paths), {"path.py", "other.py"}
)
def test_get_unused_ignore_codes(self) -> None:
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "0",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[1, 9],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "0",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
},
{
"code": "0",
"description": "The `pyre-ignore[2]` or `pyre-fixme[2]` "
+ "comment is not suppressing type errors, please remove it.",
},
]
),
[1, 2, 9],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "1",
"description": "The `pyre-ignore[1, 9]` or `pyre-fixme[1, 9]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[],
)
self.assertEqual(
_get_unused_ignore_codes(
[
{
"code": "1",
"description": "The `pyre-ignore[]` or `pyre-fixme[]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
),
[],
)
@patch.object(errors, "_get_unused_ignore_codes")
def test_remove_unused_ignores(self, get_unused_ignore_codes) -> None:
get_unused_ignore_codes.return_value = [1, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[1, 2, 3, 4]: Comment", []),
"# pyre-fixme[2]: Comment",
)
get_unused_ignore_codes.return_value = [1, 2, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[1, 2, 3, 4]: Comment", []), ""
)
get_unused_ignore_codes.return_value = [1]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme[2, 3, 4]: Comment", []),
"# pyre-fixme[2, 3, 4]: Comment",
)
get_unused_ignore_codes.return_value = [1, 2]
self.assertEqual(_remove_unused_ignores("# pyre-fixme: Comment", []), "")
get_unused_ignore_codes.return_value = [1, 2]
self.assertEqual(
_remove_unused_ignores(
"# Unrelated comment. # pyre-fixme[1, 2]: Comment", []
),
"# Unrelated comment.",
)
get_unused_ignore_codes.return_value = [1, 3, 4]
self.assertEqual(
_remove_unused_ignores("# pyre-fixme [1, 2, 3, 4]: Comment", []),
"# pyre-fixme [2]: Comment",
)
def assertSuppressErrors(
self,
errors: Dict[int, List[Dict[str, str]]],
input: str,
expected_output: str,
*,
custom_comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> None:
def _normalize(input: str) -> str:
return textwrap.dedent(input).strip().replace("FIXME", "pyre-fixme")
self.assertEqual(
_suppress_errors(
_normalize(input),
errors,
custom_comment,
max_line_length,
truncate,
unsafe,
),
_normalize(expected_output),
)
def test_suppress_errors(self) -> None:
self.assertSuppressErrors(
{},
"""
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
# Basic error suppression
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description
def foo() -> None: pass
""",
)
# Avoid duplicate error messages
self.assertSuppressErrors(
{
1: [
{"code": "1", "description": "description 1"},
{"code": "2", "description": "description duplicate"},
{"code": "2", "description": "description duplicate"},
{"code": "1", "description": "description 2"},
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description 1
# FIXME[2]: description duplicate
# FIXME[1]: description 2
def foo() -> None: pass
""",
)
# Indentation is correct.
self.assertSuppressErrors(
{2: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
pass
""",
"""
def foo() -> None:
# FIXME[1]: description
pass
""",
)
# Skip files with parse errors.
with self.assertRaises(SkippingUnparseableFileException):
_suppress_errors(
"input", {1: [{"code": "404", "description": "description"}]}
)
# Skip generated files.
with self.assertRaises(SkippingGeneratedFileException):
_suppress_errors("# @" "generated", {})
# Do not check for generated files with --unsafe.
try:
_suppress_errors(
"# @" "generated",
{},
custom_comment=None,
max_line_length=None,
truncate=False,
unsafe=True,
)
except SkippingGeneratedFileException:
self.fail("Unexpected `SkippingGeneratedFileException` exception.")
# Custom message.
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: T1234
def foo() -> None: pass
""",
custom_comment="T1234",
)
# Existing Comment
self.assertSuppressErrors(
{2: [{"code": "1", "description": "description"}]},
"""
# comment
def foo() -> None: pass
""",
"""
# comment
# FIXME[1]: description
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
def foo() -> None: # FIXME[1]
# comment
pass
""",
"""
def foo() -> None:
# comment
pass
""",
)
# Multiple Errors
self.assertSuppressErrors(
{
1: [{"code": "1", "description": "description"}],
2: [{"code": "2", "description": "description"}],
},
"""
def foo() -> None:
pass
""",
"""
# FIXME[1]: description
def foo() -> None:
# FIXME[2]: description
pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: description
# FIXME[2]: description
def foo() -> None: pass
""",
)
# Line length limit
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]:
# description
def foo() -> None: pass
""",
max_line_length=20,
)
# Remove unused ignores.
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
# over multple lines
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
# FIXME[0]: ignore
# over multple lines
# FIXME[1]: description
def foo() -> None: pass
""",
"""
# FIXME[1]: description
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{1: [{"code": "0", "description": "description"}]},
"""
def foo() -> None: pass # FIXME[0]: ignore
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [{"code": "0", "description": "description"}],
2: [{"code": "0", "description": "description"}],
},
"""
# FIXME[1]: ignore
# FIXME[2]: ignore
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "0", "description": "description"},
{"code": "2", "description": "new error"},
]
},
"""
def foo() -> None: pass # FIXME[1]
""",
"""
# FIXME[2]: new error
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{"code": "2", "description": "new error"},
{"code": "0", "description": "description"},
]
},
"""
def foo() -> None: pass # FIXME[1]
""",
"""
# FIXME[2]: new error
def foo() -> None: pass
""",
)
# Remove unused ignores by error code.
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1]` or `pyre-fixme[1]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
def foo() -> None: pass # FIXME[1, 2]
""",
"""
def foo() -> None: pass # FIXME[2]
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 2, 3]
# Continuation comment.
def foo() -> None: pass
""",
"""
# FIXME[2]
# Continuation comment.
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 2, 3]: Comment[Comment]
def foo() -> None: pass
""",
"""
# FIXME[2]: Comment[Comment]
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
]
},
"""
# FIXME[1, 3]
# Continuation comment.
def foo() -> None: pass
""",
"""
def foo() -> None: pass
""",
)
self.assertSuppressErrors(
{
1: [
{
"code": "0",
"description": "The `pyre-ignore[1, 3]` or `pyre-fixme[1, 3]` "
+ "comment is not suppressing type errors, please remove it.",
}
],
2: [{"code": "4", "description": "Description."}],
},
"""
# FIXME[1, 2, 3]
def foo() -> None: pass
""",
"""
# FIXME[2]
# FIXME[4]: Description.
def foo() -> None: pass
""",
)
# Truncate long comments.
self.assertSuppressErrors(
{1: [{"code": "1", "description": "description"}]},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: descr...
def foo() -> None: pass
""",
max_line_length=25,
truncate=True,
)
self.assertSuppressErrors(
{
1: [
{
"code": "1",
"description": "this description takes up over four lines \
of content when it is split, given the max line length",
}
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: this ...
def foo() -> None: pass
""",
max_line_length=25,
)
# Line breaks without errors.
self.assertSuppressErrors(
{},
"""
def foo() -> None:
\"\"\"
Random line break that won't parse.
/!\\
Text.
\"\"\"
pass
""",
"""
def foo() -> None:
\"\"\"
Random line break that won't parse.
/!\\
Text.
\"\"\"
pass
""",
)
# Line breaks.
self.assertSuppressErrors(
{
3: [{"code": "1", "description": "description"}],
4: [{"code": "2", "description": "description"}],
},
"""
def foo() -> None:
x = something + \\
error() + \\
error() # unrelated comment
""",
"""
def foo() -> None:
x = (something +
# FIXME[1]: description
error() +
# FIXME[2]: description
error()) # unrelated comment
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
x, y, z = \\
error()
""",
"""
def foo() -> None:
x, y, z = (
# FIXME[1]: description
error())
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
del \\
error
""",
"""
def foo() -> None:
del (
# FIXME[1]: description
error)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
assert \\
test
""",
"""
def foo() -> None:
assert (
# FIXME[1]: description
test)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
assert test + \\
test2
""",
"""
def foo() -> None:
assert (test +
# FIXME[1]: description
test2)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
raise \\
Exception()
""",
"""
def foo() -> None:
raise (
# FIXME[1]: description
Exception())
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
return a + \\
error
""",
"""
def foo() -> None:
return (a +
# FIXME[1]: description
error)
""",
)
self.assertSuppressErrors(
{3: [{"code": "1", "description": "description"}]},
"""
def foo() -> None:
return \\
error
""",
"""
def foo() -> None:
return (
# FIXME[1]: description
error)
""",
)
def test_suppress_errors__long_class_name(self) -> None:
self.assertSuppressErrors(
{
1: [
{
"code": "1",
"description": "This is a \
really.long.class.name.exceeding.twenty.five.Characters",
}
]
},
"""
def foo() -> None: pass
""",
"""
# FIXME[1]: This ...
def foo() -> None: pass
""",
max_line_length=25,
)
def test_suppress_errors__manual_import(self) -> None:
self.assertSuppressErrors(
{
3: [{"code": "21", "description": "description"}],
4: [{"code": "21", "description": "description"}],
},
"""
from a import b
# @manual=//special:case
from a import c
from a import d
""",
"""
from a import b
# FIXME[21]: description
# @manual=//special:case
from a import c
# FIXME[21]: description
from a import d
""",
)
def test_suppress_errors__format_string(self) -> None:
self.assertSuppressErrors(
{
4: [
{
"code": "42",
"description": "Some error",
}
],
5: [
{
"code": "42",
"description": "Some error",
},
{
"code": "43",
"description": "Some error",
},
],
},
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
"""
def foo() -> None:
# FIXME[42]: Some error
# FIXME[43]: Some error
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
)
self.assertSuppressErrors(
{
4: [
{
"code": "42",
"description": "Some error 1",
},
{
"code": "42",
"description": "Some error 2",
},
],
},
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
"""
def foo() -> None:
# FIXME[42]: Some error 1
# FIXME[42]: Some error 2
f\"\"\"
foo
{1 + "hello"}
{"world" + int("a")}
bar
\"\"\"
""",
)
def assertLinesSpanned(
self, source: str, expected_lines: List[Tuple[int, int]]
) -> None:
self.assertEqual(
list(
_line_ranges_spanned_by_format_strings(textwrap.dedent(source)).values()
),
expected_lines,
)
def test_lines_spanned_by_format_strings(self) -> None:
self.assertLinesSpanned(
"""
def foo() -> None:
f\"\"\"
foo
{1 + "hello"}
bar
\"\"\"
f\"\"\"
bar
\"\"\"
""",
[(3, 7), (9, 11)],
)
self.assertLinesSpanned(
"""
def foo() -> None:
f"{1 + "hello"}"
""",
[(3, 3)],
)
# Skip checking of format strings in case libcst barfs on the parsing.
self.assertLinesSpanned(
"""
def cannot_parse()
""",
[],
)
def test_map_line_to_start_of_range(self) -> None:
self.assertEqual(
_map_line_to_start_of_range([(3, 3), (3, 5), (9, 13)]),
{3: 3, 4: 3, 5: 3, 9: 9, 10: 9, 11: 9, 12: 9, 13: 9},
)
self.assertEqual(
_map_line_to_start_of_range([]),
{},
)
# Intervals shouldn't overlap, but if they do, we will prefer the earlier one.
self.assertEqual(
_map_line_to_start_of_range([(3, 5), (4, 6)]),
{3: 3, 4: 3, 5: 3, 6: 4},
)
def test_relocate_errors(self) -> None:
errors = {
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
],
2: [
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
],
3: [
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
}
self.assertEqual(
_relocate_errors(
errors,
{},
),
errors,
)
self.assertEqual(
_relocate_errors(
errors,
{2: 1, 3: 1},
),
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
},
)
self.assertEqual(
_relocate_errors(
errors,
{1: 1, 2: 2, 3: 2},
),
{
1: [
{"code": "1", "description": "description"},
{"code": "2", "description": "description"},
],
2: [
{"code": "3", "description": "description"},
{"code": "4", "description": "description"},
{"code": "5", "description": "description"},
{"code": "6", "description": "description"},
],
},
)
|
class Ast:
def __init__(self):
self.nodes = []
def __repr__(self):
return ''.join([str(n) for n in self.nodes])
class Node:
def __init__(self, node_type, **kwargs):
self.node_type = node_type
self.indent = 0
class Comment(Node):
def __init__(self, text, **kwargs):
super().__init__('comment', **kwargs)
self.text = text
def __repr__(self):
indent = ' ' * self.indent
text = self.text.replace('\n', f'\n{indent}')
return f'/*{text}*/'
class Include(Node):
def __init__(self, hname, **kwargs):
super().__init__('include', **kwargs)
if '<' in hname and '>' in hname:
self.hname = hname
else:
self.hname = f'"{hname}"'
def __repr__(self):
return f'#include {self.hname}\n'
class Ifndef(Node):
def __init__(self, symbol, **kwargs):
super().__init__('ifndef', **kwargs)
self.symbol = symbol
def __repr__(self):
return f'#ifndef {self.symbol}\n'
class Define(Node):
def __init__(self, symbol, **kwargs):
super().__init__('define', **kwargs)
self.symbol = symbol
def __repr__(self):
return f'#define {self.symbol}\n'
class Endif(Node):
def __init__(self, **kwargs):
super().__init__('endif', **kwargs)
def __repr__(self):
return f'#endif\n'
class Block(Node):
def __init__(self, **kwargs):
super().__init__('block', **kwargs)
self.nodes = []
def __repr__(self):
return f'{{\n{self.nodes}\n}}'
class If(Node):
def __init__(self, condition=None, **kwargs):
super().__init__('if', **kwargs)
self.condition = condition
self.true_nodes = []
self.false_nodes = []
def add_true(self, node):
self.true_nodes.append(node)
def add_false(self, node):
self.false_nodes.append(node)
def __str__(self):
true_str = ''
for n in self.true_nodes:
n.indent = self.indent + 4
true_str += str(n)
false_str = ''
for n in self.false_nodes:
n.indent = self.indent + 4
false_str += str(n)
indent = ' ' * self.indent
true_expr = f'{indent}if({self.condition})\n{indent}{{\n{true_str}{indent}}}\n'
if self.false_nodes:
else_expr = f'{indent}else\n{indent}{{\n{false_str}{indent}}}\n'
return true_expr + else_expr
else:
return true_expr
class Switch(Node):
def __init__(self, variable, **kwargs):
super().__init__('switch', **kwargs)
self.variable = variable
self.cases = []
self.default = []
def __str__(self):
cases = []
for n in self.cases:
n.indent = self.indent + 4
cases.append(str(n))
cases_str = '\n'.join(cases)
if self.default:
default = []
for d in self.default:
d.indent = self.indent + 8
default.append(str(d))
default_str = ' ' * (self.indent + 4) + 'default:\n' + ''.join(default)
else:
default_str = ''
indent = ' ' * self.indent
return f'{indent}switch({self.variable})\n{indent}{{\n{cases_str}\n{default_str}{indent}}}\n'
def add_case(self, case):
self.cases.append(case)
def add_default(self, node):
self.default.append(node)
class Break(Node):
def __init__(self, **kwargs):
super().__init__('break', **kwargs)
def __str__(self):
indent = ' ' * self.indent
return f'{indent}break;\n'
class Decl(Node):
def __init__(self, type_name, name, value, **kwargs):
super().__init__('decl', **kwargs)
self.type_name = type_name
self.name = name
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.type_name} {self.name}={self.value};\n'
class Enum(Node):
def __init__(self, name, items, **kwargs):
super().__init__('enum', **kwargs)
self.name = name
self.items = items
def __str__(self):
indent = ' ' * self.indent
enum = f'{indent}typedef enum {self.name}\n{indent}{{\n'
for name, value in self.items.items():
enum += f'{indent} {name} = {value},\n'
enum += f'{indent}}} {self.name};\n'
return enum
class Assignment(Node):
def __init__(self, name, value, **kwargs):
super().__init__('decl', **kwargs)
self.name = name
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.name} = {self.value};\n'
class Blank(Node):
def __init__(self, **kwargs):
super().__init__('break', **kwargs)
def __str__(self):
return f'\n'
class Case(Node):
def __init__(self, label, **kwargs):
super().__init__('case', **kwargs)
self.label = label
self.nodes = []
def __str__(self):
nodes_str = ''
for n in self.nodes:
n.indent = self.indent + 4
nodes_str += str(n)
if self.nodes[-1].node_type not in ['break', 'return']:
nodes_str += f'{' ' * (self.indent + 4)}break;\n'
indent = ' ' * self.indent
return f'{indent}case {self.label}:\n{nodes_str}'
def add(self, node):
self.nodes.append(node)
class FuncDeclaration(Node):
def __init__(self, name, ftype='void *', params='void *self, void *e', spec='', **kwargs):
super().__init__('func_decl', **kwargs)
self.name = name
self.ftype = ftype
self.params = params
self.spec = spec
def __str__(self):
indent = ' ' * self.indent
if self.spec:
spec = self.spec + ' '
else:
spec = ''
return f'\n{indent}{spec}{self.ftype} {self.name}({self.params});'
__repr__ = __str__
class Func(Node):
def __init__(self, name, ftype='void *', params='void *self, void *e', spec='', **kwargs):
super().__init__('func', **kwargs)
self.name = name
self.ftype = ftype
self.params = params
self.nodes = []
self.spec = spec
def add(self, node):
self.nodes.append(node)
def declaration(self):
return FuncDeclaration(self.name, self.ftype, self.params, self.spec)
def __str__(self):
nodes_str = ''
for n in self.nodes:
n.indent = self.indent + 4
nodes_str += str(n)
indent = ' ' * self.indent
if self.spec:
spec = self.spec + ' '
else:
spec = ''
return f'\n{indent}{spec}{self.ftype} {self.name}({self.params})\n{indent}{{\n{nodes_str}{indent}}}\n'
__repr__ = __str__
class Call(Node):
def __init__(self, func_name, params, standalone=True, **kwargs):
super().__init__('call', **kwargs)
self.func_name = func_name
self.params = params
self.eol = ';\n' if standalone else ''
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.func_name}({self.params}){self.eol}'
class Expr(Node):
def __init__(self, expr, **kwargs):
super().__init__('expr', **kwargs)
self.expr = expr.format(**kwargs)
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.expr}\n'
class Return(Node):
def __init__(self, value, **kwargs):
super().__init__('return', **kwargs)
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}return {self.value};\n' | class Ast:
def __init__(self):
self.nodes = []
def __repr__(self):
return ''.join([str(n) for n in self.nodes])
class Node:
def __init__(self, node_type, **kwargs):
self.node_type = node_type
self.indent = 0
class Comment(Node):
def __init__(self, text, **kwargs):
super().__init__('comment', **kwargs)
self.text = text
def __repr__(self):
indent = ' ' * self.indent
text = self.text.replace('\n', f'\n{indent}')
return f'/*{text}*/'
class Include(Node):
def __init__(self, hname, **kwargs):
super().__init__('include', **kwargs)
if '<' in hname and '>' in hname:
self.hname = hname
else:
self.hname = f'"{hname}"'
def __repr__(self):
return f'#include {self.hname}\n'
class Ifndef(Node):
def __init__(self, symbol, **kwargs):
super().__init__('ifndef', **kwargs)
self.symbol = symbol
def __repr__(self):
return f'#ifndef {self.symbol}\n'
class Define(Node):
def __init__(self, symbol, **kwargs):
super().__init__('define', **kwargs)
self.symbol = symbol
def __repr__(self):
return f'#define {self.symbol}\n'
class Endif(Node):
def __init__(self, **kwargs):
super().__init__('endif', **kwargs)
def __repr__(self):
return f'#endif\n'
class Block(Node):
def __init__(self, **kwargs):
super().__init__('block', **kwargs)
self.nodes = []
def __repr__(self):
return f'{{\n{self.nodes}\n}}'
class If(Node):
def __init__(self, condition=None, **kwargs):
super().__init__('if', **kwargs)
self.condition = condition
self.true_nodes = []
self.false_nodes = []
def add_true(self, node):
self.true_nodes.append(node)
def add_false(self, node):
self.false_nodes.append(node)
def __str__(self):
true_str = ''
for n in self.true_nodes:
n.indent = self.indent + 4
true_str += str(n)
false_str = ''
for n in self.false_nodes:
n.indent = self.indent + 4
false_str += str(n)
indent = ' ' * self.indent
true_expr = f'{indent}if({self.condition})\n{indent}{{\n{true_str}{indent}}}\n'
if self.false_nodes:
else_expr = f'{indent}else\n{indent}{{\n{false_str}{indent}}}\n'
return true_expr + else_expr
else:
return true_expr
class Switch(Node):
def __init__(self, variable, **kwargs):
super().__init__('switch', **kwargs)
self.variable = variable
self.cases = []
self.default = []
def __str__(self):
cases = []
for n in self.cases:
n.indent = self.indent + 4
cases.append(str(n))
cases_str = '\n'.join(cases)
if self.default:
default = []
for d in self.default:
d.indent = self.indent + 8
default.append(str(d))
default_str = ' ' * (self.indent + 4) + 'default:\n' + ''.join(default)
else:
default_str = ''
indent = ' ' * self.indent
return f'{indent}switch({self.variable})\n{indent}{{\n{cases_str}\n{default_str}{indent}}}\n'
def add_case(self, case):
self.cases.append(case)
def add_default(self, node):
self.default.append(node)
class Break(Node):
def __init__(self, **kwargs):
super().__init__('break', **kwargs)
def __str__(self):
indent = ' ' * self.indent
return f'{indent}break;\n'
class Decl(Node):
def __init__(self, type_name, name, value, **kwargs):
super().__init__('decl', **kwargs)
self.type_name = type_name
self.name = name
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.type_name} {self.name}={self.value};\n'
class Enum(Node):
def __init__(self, name, items, **kwargs):
super().__init__('enum', **kwargs)
self.name = name
self.items = items
def __str__(self):
indent = ' ' * self.indent
enum = f'{indent}typedef enum {self.name}\n{indent}{{\n'
for name, value in self.items.items():
enum += f'{indent} {name} = {value},\n'
enum += f'{indent}}} {self.name};\n'
return enum
class Assignment(Node):
def __init__(self, name, value, **kwargs):
super().__init__('decl', **kwargs)
self.name = name
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.name} = {self.value};\n'
class Blank(Node):
def __init__(self, **kwargs):
super().__init__('break', **kwargs)
def __str__(self):
return f'\n'
class Case(Node):
def __init__(self, label, **kwargs):
super().__init__('case', **kwargs)
self.label = label
self.nodes = []
def __str__(self):
nodes_str = ''
for n in self.nodes:
n.indent = self.indent + 4
nodes_str += str(n)
if self.nodes[-1].node_type not in ['break', 'return']:
nodes_str += f'{" " * (self.indent + 4)}break;\n'
indent = ' ' * self.indent
return f'{indent}case {self.label}:\n{nodes_str}'
def add(self, node):
self.nodes.append(node)
class FuncDeclaration(Node):
def __init__(self, name, ftype='void *', params='void *self, void *e', spec='', **kwargs):
super().__init__('func_decl', **kwargs)
self.name = name
self.ftype = ftype
self.params = params
self.spec = spec
def __str__(self):
indent = ' ' * self.indent
if self.spec:
spec = self.spec + ' '
else:
spec = ''
return f'\n{indent}{spec}{self.ftype} {self.name}({self.params});'
__repr__ = __str__
class Func(Node):
def __init__(self, name, ftype='void *', params='void *self, void *e', spec='', **kwargs):
super().__init__('func', **kwargs)
self.name = name
self.ftype = ftype
self.params = params
self.nodes = []
self.spec = spec
def add(self, node):
self.nodes.append(node)
def declaration(self):
return FuncDeclaration(self.name, self.ftype, self.params, self.spec)
def __str__(self):
nodes_str = ''
for n in self.nodes:
n.indent = self.indent + 4
nodes_str += str(n)
indent = ' ' * self.indent
if self.spec:
spec = self.spec + ' '
else:
spec = ''
return f'\n{indent}{spec}{self.ftype} {self.name}({self.params})\n{indent}{{\n{nodes_str}{indent}}}\n'
__repr__ = __str__
class Call(Node):
def __init__(self, func_name, params, standalone=True, **kwargs):
super().__init__('call', **kwargs)
self.func_name = func_name
self.params = params
self.eol = ';\n' if standalone else ''
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.func_name}({self.params}){self.eol}'
class Expr(Node):
def __init__(self, expr, **kwargs):
super().__init__('expr', **kwargs)
self.expr = expr.format(**kwargs)
def __str__(self):
indent = ' ' * self.indent
return f'{indent}{self.expr}\n'
class Return(Node):
def __init__(self, value, **kwargs):
super().__init__('return', **kwargs)
self.value = value
def __str__(self):
indent = ' ' * self.indent
return f'{indent}return {self.value};\n' |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cartesian:
#
# Cartesian plots
# ===============
#
# This section documents features used for modifying Cartesian *x* and *y*
# axis settings, including axis scales, tick locations, and tick label
# formatting. It also documents a handy "dual axes" feature.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_locators:
#
# Tick locations
# --------------
#
# Matplotlib `tick locators
# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-locators.html>`__
# select sensible tick locations based on the axis data limits. In ProPlot, you can
# change the tick locator using the `~proplot.axes.CartesianAxes.format` keyword
# arguments `xlocator`, `ylocator`, `xminorlocator`, and `yminorlocator` (or their
# aliases, `xticks`, `yticks`, `xminorticks`, and `yminorticks`). This is powered by
# the `~proplot.constructor.Locator` :ref:`constructor function <why_constructor>`.
#
# These keyword arguments can be used to apply built-in matplotlib
# `~matplotlib.ticker.Locator`\ s by their "registered" names (e.g.
# ``xlocator='log'``), to draw ticks every ``N`` data values with
# `~matplotlib.ticker.MultipleLocator` (e.g., ``xlocator=2``), or to tick the
# specific locations in a list using `~matplotlib.ticker.FixedLocator` (just
# like `~matplotlib.axes.Axes.set_xticks` and
# `~matplotlib.axes.Axes.set_yticks`). See
# `~proplot.axes.CartesianAxes.format` and `~proplot.constructor.Locator` for
# details.
#
# To generate lists of tick locations, we recommend using ProPlot's
# `~proplot.utils.arange` function -- it’s basically an *endpoint-inclusive*
# version of `numpy.arange`, which is usually what you'll want in this
# context.
# %%
import proplot as pplt
import numpy as np
state = np.random.RandomState(51423)
pplt.rc.update(
facecolor=pplt.scale_luminance('powderblue', 1.15),
linewidth=1, fontsize=10,
color='dark blue', suptitlecolor='dark blue',
titleloc='upper center', titlecolor='dark blue', titleborder=False,
)
fig, axs = pplt.subplots(nrows=8, refwidth=5, refaspect=(8, 1), share=0)
axs.format(suptitle='Tick locators demo')
# Step size for tick locations
axs[0].format(
xlim=(0, 200), xminorlocator=10, xlocator=30,
title='MultipleLocator'
)
# Specific list of locations
axs[1].format(
xlim=(0, 10), xminorlocator=0.1,
xlocator=[0, 0.3, 0.8, 1.6, 4.4, 8, 8.8, 10],
title='FixedLocator',
)
# Ticks at numpy.linspace(xmin, xmax, N)
axs[2].format(
xlim=(0, 10), xlocator=('linear', 21),
title='LinearLocator',
)
# Logarithmic locator, used automatically for log scale plots
axs[3].format(
xlim=(1, 100), xlocator='log', xminorlocator='logminor',
title='LogLocator',
)
# Maximum number of ticks, but at "nice" locations
axs[4].format(
xlim=(1, 7), xlocator=('maxn', 11),
title='MaxNLocator',
)
# Index locator, only draws ticks where data is plotted
axs[5].plot(np.arange(10) - 5, state.rand(10), alpha=0)
axs[5].format(
xlim=(0, 6), ylim=(0, 1), xlocator='index',
xformatter=[r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'],
title='IndexLocator',
)
pplt.rc.reset()
# Hide all ticks
axs[6].format(
xlim=(-10, 10), xlocator='null',
title='NullLocator',
)
# Tick locations that cleanly divide 60 minute/60 second intervals
axs[7].format(
xlim=(0, 2), xlocator='dms', xformatter='dms',
title='Degree-Minute-Second Locator (requires cartopy)',
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_formatters:
#
# Tick formatting
# ---------------
#
# Matplotlib `tick formatters
# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-formatters.html>`__
# convert floating point numbers to nicely-formatted tick labels. In ProPlot, you can
# change the tick formatter using the `~proplot.axes.CartesianAxes.format` keyword
# arguments `xformatter` and `yformatter` (or their aliases, `xticklabels` and
# `yticklabels`). This is powered by the `~proplot.constructor.Formatter`
# :ref:`constructor function <why_constructor>`.
#
# These keyword arguments can be used to apply built-in matplotlib
# `~matplotlib.ticker.Formatter`\ s by their "registered" names (e.g.
# ``xformatter='log'``), to apply a ``%``-style format directive with
# `~matplotlib.ticker.FormatStrFormatter` (e.g., ``xformatter='%.0f'``), or
# to apply custom tick labels with `~matplotlib.ticker.FixedFormatter` (just
# like `~matplotlib.axes.Axes.set_xticklabels` and
# `~matplotlib.axes.Axes.set_yticklabels`). They can also be used
# to apply one of ProPlot's new tick formatters -- for example,
# ``xformatter='deglat'`` to label ticks as the geographic latitude,
# ``xformatter='pi'`` to label ticks as fractions of :math:`\pi`,
# or ``xformatter='sci'`` to label ticks with scientific notation.
# See `~proplot.axes.CartesianAxes.format` and
# `~proplot.constructor.Formatter` for details.
#
# ProPlot also changes the default tick formatter to
# `~proplot.ticker.AutoFormatter`. This class trims trailing zeros by
# default, can be used to *omit tick labels* outside of some data range, and
# can add arbitrary prefixes and suffixes to each label. See
# `~proplot.ticker.AutoFormatter` for details. To disable the trailing
# zero-trimming feature, set :rcraw:`formatter.zerotrim` to ``False``.
# %%
import proplot as pplt
pplt.rc.linewidth = 2
pplt.rc.fontsize = 11
locator = [0, 0.25, 0.5, 0.75, 1]
fig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.5, share=0)
# Formatter comparison
axs[0].format(
xformatter='scalar', yformatter='scalar', title='Matplotlib formatter'
)
axs[1].format(yticklabelloc='both', title='ProPlot formatter')
axs[:2].format(xlocator=locator, ylocator=locator)
# Limiting the tick range
axs[2].format(
title='Omitting tick labels', ticklen=5, xlim=(0, 5), ylim=(0, 5),
xtickrange=(0, 2), ytickrange=(0, 2), xlocator=1, ylocator=1
)
# Setting the wrap range
axs[3].format(
title='Wrapping the tick range', ticklen=5, xlim=(0, 7), ylim=(0, 6),
xwraprange=(0, 5), ywraprange=(0, 3), xlocator=1, ylocator=1
)
axs.format(
ytickloc='both', yticklabelloc='both',
titlepad='0.5em', suptitle='Default formatters demo'
)
pplt.rc.reset()
# %%
import proplot as pplt
import numpy as np
pplt.rc.update(
linewidth=1.2, fontsize=10, facecolor='gray0', figurefacecolor='gray2',
color='gray8', gridcolor='gray8', titlecolor='gray8', suptitlecolor='gray8',
titleloc='upper center', titleborder=False,
)
fig, axs = pplt.subplots(nrows=9, refwidth=5, refaspect=(8, 1), share=0)
# Scientific notation
axs[0].format(xlim=(0, 1e20), xformatter='sci', title='SciFormatter')
# N significant figures for ticks at specific values
axs[1].format(
xlim=(0, 20), xlocator=(0.0034, 3.233, 9.2, 15.2344, 7.2343, 19.58),
xformatter=('sigfig', 2), title='SigFigFormatter', # 2 significant digits
)
# Fraction formatters
axs[2].format(
xlim=(0, 3 * np.pi), xlocator=np.pi / 4, xformatter='pi', title='FracFormatter',
)
axs[3].format(
xlim=(0, 2 * np.e), xlocator=np.e / 2, xticklabels='e', title='FracFormatter',
)
# Geographic formatters
axs[4].format(
xlim=(-90, 90), xlocator=30, xformatter='deglat', title='Latitude Formatter'
)
axs[5].format(
xlim=(0, 360), xlocator=60, xformatter='deglon', title='Longitude Formatter'
)
# User input labels
axs[6].format(
xlim=(-1.01, 1), xlocator=0.5,
xticklabels=['a', 'b', 'c', 'd', 'e'], title='FixedFormatter',
)
# Custom style labels
axs[7].format(
xlim=(0, 0.001), xlocator=0.0001, xformatter='%.E', title='FormatStrFormatter',
)
axs[8].format(
xlim=(0, 100), xtickminor=False, xlocator=20,
xformatter='{x:.1f}', title='StrMethodFormatter',
)
axs.format(ylocator='null', suptitle='Tick formatters demo')
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_datetime:
#
# Datetime ticks
# --------------
#
# ProPlot can also be used to customize the tick locations and tick label
# format of "datetime" axes.
# To draw ticks on some particular time unit, just use a unit string (e.g.,
# ``xlocator='month'``). To draw ticks every ``N`` time units, just use a (unit, N)
# tuple (e.g., ``xlocator=('day', 5)``). For `% style formatting
# <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
# of datetime tick labels, just use a string containing ``'%'`` (e.g.
# ``xformatter='%Y-%m-%d'``).
# See `~proplot.axes.CartesianAxes.format`, `~proplot.constructor.Locator`,
# and `~proplot.constructor.Formatter` for details.
# %%
import proplot as pplt
import numpy as np
pplt.rc.update(
linewidth=1.2, fontsize=10, ticklenratio=0.7,
figurefacecolor='w', facecolor='pastel blue',
titleloc='upper center', titleborder=False,
)
fig, axs = pplt.subplots(nrows=5, refwidth=6, refaspect=(8, 1), share=0)
axs[:4].format(xrotation=0) # no rotation for these examples
# Default date locator
# This is enabled if you plot datetime data or set datetime limits
axs[0].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-02')),
title='Auto date locator and formatter'
)
# Concise date formatter introduced in matplotlib 3.1
axs[1].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-01')),
xformatter='concise', title='Concise date formatter',
)
# Minor ticks every year, major every 10 years
axs[2].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2050-01-01')),
xlocator=('year', 10), xformatter='\'%y', title='Ticks every N units',
)
# Minor ticks every 10 minutes, major every 2 minutes
axs[3].format(
xlim=(np.datetime64('2000-01-01T00:00:00'), np.datetime64('2000-01-01T12:00:00')),
xlocator=('hour', range(0, 24, 2)), xminorlocator=('minute', range(0, 60, 10)),
xformatter='T%H:%M:%S', title='Ticks at specific intervals',
)
# Month and year labels, with default tick label rotation
axs[4].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2008-01-01')),
xlocator='year', xminorlocator='month', # minor ticks every month
xformatter='%b %Y', title='Ticks with default rotation',
)
axs.format(
ylocator='null', suptitle='Datetime locators and formatters demo'
)
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_scales:
#
# Axis scale changes
# ------------------
#
# "Axis scales" like ``'linear'`` and ``'log'`` control the *x* and *y* axis
# coordinate system. To change the axis scale, simply pass e.g.
# ``xscale='log'`` or ``yscale='log'`` to `~proplot.axes.Axes.format`. This
# is powered by the `~proplot.constructor.Scale`
# :ref:`constructor function <why_constructor>`.
#
# ProPlot also makes several changes to the axis scale API:
#
# * The `~proplot.ticker.AutoFormatter` formatter is now used for all axis scales
# by default, including ``'log'`` and ``'symlog'``. Matplotlib's behavior can
# be restored by passing e.g. ``xformatter='log'`` or ``yformatter='log'`` to
# `~proplot.axes.CartesianAxes.format`.
# * To make its behavior consistent with `~proplot.constructor.Locator` and
# `~proplot.constructor.Formatter`, the `~proplot.constructor.Scale`
# constructor function returns instances of `~matplotlib.scale.ScaleBase`,
# and `~matplotlib.axes.Axes.set_xscale` and
# `~matplotlib.axes.Axes.set_yscale` now accept these class instances in
# addition to "registered" names like ``'log'``.
# * While matplotlib axis scales must be instantiated with an
# `~matplotlib.axis.Axis` instance (for backwards compatibility reasons),
# ProPlot axis scales can be instantiated without the axis instance
# (e.g., ``pplt.LogScale()`` instead of ``pplt.LogScale(ax.xaxis)``).
# * The default `subs` for the ``'symlog'`` axis scale is now ``np.arange(1, 10)``,
# and the default `linthresh` is now ``1``. Also the ``'log'`` and ``'symlog'``
# axis scales now accept the keywords `base`, `linthresh`, `linscale`, and
# `subs` rather than keywords with trailing ``x`` or ``y``.
# %%
import proplot as pplt
import numpy as np
N = 200
lw = 3
pplt.rc.update({
'linewidth': 1, 'ticklabelweight': 'bold', 'axeslabelweight': 'bold'
})
fig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.8, share=0)
axs.format(suptitle='Axis scales demo', ytickminor=True)
# Linear and log scales
axs[0].format(yscale='linear', ylabel='linear scale')
axs[1].format(ylim=(1e-3, 1e3), yscale='log', ylabel='log scale')
axs[:2].plot(np.linspace(0, 1, N), np.linspace(0, 1000, N), lw=lw)
# Symlog scale
ax = axs[2]
ax.format(yscale='symlog', ylabel='symlog scale')
ax.plot(np.linspace(0, 1, N), np.linspace(-1000, 1000, N), lw=lw)
# Logit scale
ax = axs[3]
ax.format(yscale='logit', ylabel='logit scale')
ax.plot(np.linspace(0, 1, N), np.linspace(0.01, 0.99, N), lw=lw)
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_scales_new:
#
# Special axis scales
# -------------------
#
# ProPlot introduces several new axis scales. The ``'cutoff'`` scale (see
# `~proplot.scale.CutoffScale`) is useful when the statistical distribution
# of your data is very unusual. The ``'sine'`` scale (see
# `~proplot.scale.SineLatitudeScale`) scales the axis with a sine function,
# resulting in an *area weighted* spherical latitude coordinate, and the
# ``'mercator'`` scale (see `~proplot.scale.MercatorLatitudeScale`) scales
# the axis with the Mercator projection latitude coordinate. The
# ``'inverse'`` scale (see `~proplot.scale.InverseScale`) can be useful when
# working with spectral data, especially with
# :ref:`"dual" unit axes <ug_dual>`.
# %%
import proplot as pplt
import numpy as np
fig, axs = pplt.subplots(nrows=4, refaspect=(5, 1), figwidth=6, sharex=False)
ax = axs[0]
# Sample data
x = np.linspace(0, 4 * np.pi, 100)
dy = np.linspace(-1, 1, 5)
y1 = np.sin(x)
y2 = np.cos(x)
state = np.random.RandomState(51423)
data = state.rand(len(dy) - 1, len(x) - 1)
# Loop through various cutoff scale options
titles = ('Zoom out of left', 'Zoom into left', 'Discrete jump', 'Fast jump')
args = (
(np.pi, 3), # speed up
(3 * np.pi, 1 / 3), # slow down
(np.pi, np.inf, 3 * np.pi), # discrete jump
(np.pi, 5, 3 * np.pi) # fast jump
)
locators = (
np.pi / 3,
np.pi / 3,
np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),
np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),
)
for ax, iargs, title, locator in zip(axs, args, titles, locators):
ax.pcolormesh(x, dy, data, cmap='grays', cmap_kw={'right': 0.8})
for y, color in zip((y1, y2), ('coral', 'sky blue')):
ax.plot(x, y, lw=4, color=color)
ax.format(
xscale=('cutoff', *iargs), title=title,
xlim=(0, 4 * np.pi), ylabel='wave amplitude',
xformatter='pi', xlocator=locator,
xtickminor=False, xgrid=True, ygrid=False, suptitle='Cutoff axis scales demo'
)
# %%
import proplot as pplt
import numpy as np
# Create figure
pplt.rc.reset()
state = np.random.RandomState(51423)
colors = ('coral', 'sky blue')
fig, axs = pplt.subplots(nrows=2, ncols=3, refwidth=1.7, share=0, order='F')
axs.format(
toplabels=('Geographic scales', 'Exponential scales', 'Power scales'),
)
# Geographic scales
n = 20
x = np.linspace(-180, 180, n)
y1 = np.linspace(-85, 85, n)
y2 = np.linspace(-85, 85, n)
data = state.rand(len(x) - 1, len(y2) - 1)
for ax, scale, color in zip(axs[:2], ('sine', 'mercator'), colors):
ax.plot(x, y1, '-', color=color, lw=4)
ax.pcolormesh(x, y2, data, cmap='grays', cmap_kw={'right': 0.8})
ax.format(
title=scale.title() + ' y-axis', yscale=scale, ytickloc='left',
yformatter='deg', grid=False, ylocator=20,
xscale='linear', xlim=None, ylim=(-85, 85)
)
# Exp scales
x = np.linspace(0, 1, 50)
y = 10 * x
data = state.rand(len(y) - 1, len(x) - 1)
for ax, a, c, color in zip(axs[2:4], (np.e, 2), (0.5, 2), colors):
ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})
ax.plot(x, y, lw=4, color=color)
ax.format(
ylim=(0.1, 10), yscale=('exp', a, c),
title=f"${(a, "e")[a == np.e]}^{{{(c, "")[c == 1]}x}}$"
)
# Power scales
for ax, power, color in zip(axs[4:], (2, 1 / 4), colors):
ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})
ax.plot(x, y, lw=4, color=color)
ax.format(
ylim=(0.1, 10), yscale=('power', power),
title=f'$x^{{{power}}}$'
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_dual:
#
# Dual unit scales
# ----------------
#
# The `~proplot.axes.CartesianAxes.dualx` and
# `~proplot.axes.CartesianAxes.dualy` methods can be used to draw duplicate
# *x* and *y* axes meant to represent *alternate units* in the same
# coordinate range as the "parent" axis. This feature is powered by the
# `~proplot.scale.FuncScale` class.
#
# `~proplot.axes.CartesianAxes.dualx` and `~proplot.axes.CartesianAxes.dualy`
# accept either (1) a single linear forward function, (2) a pair of arbitrary
# forward and inverse functions, or (3) a scale name or scale class instance.
# In the latter case, the scale's transforms are used for the forward and
# inverse functions, and the scale's default locators and formatters are used
# for the default `~proplot.scale.FuncScale` locators and formatters.
#
# In the below examples, we generate dual axes with each of these three methods. Note
# that the "parent" axis scale is now arbitrary -- in the first example shown below,
# we create a `~proplot.axes.CartesianAxes.dualx` axis for an axis scaled by the
# `symlog scale <https://matplotlib.org/stable/gallery/scales/symlog_demo.html>`__.
# %%
import proplot as pplt
pplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, axs = pplt.subplots(
[[1, 1, 2, 2], [0, 3, 3, 0]],
share=0, refaspect=2.2, refwidth=3
)
axs.format(
suptitle='Duplicate axes with custom transformations',
xcolor=c1, gridcolor=c1,
ylocator=[], yformatter=[]
)
# Meters and kilometers
ax = axs[0]
ax.format(xlim=(0, 5000), xlabel='meters')
ax.dualx(
lambda x: x * 1e-3,
label='kilometers', grid=True, color=c2, gridcolor=c2
)
# Kelvin and Celsius
ax = axs[1]
ax.format(xlim=(200, 300), xlabel='temperature (K)')
ax.dualx(
lambda x: x - 273.15,
label='temperature (\N{DEGREE SIGN}C)', grid=True, color=c2, gridcolor=c2
)
# With symlog parent
ax = axs[2]
ax.format(xlim=(-100, 100), xscale='symlog', xlabel='MegaJoules')
ax.dualx(
lambda x: x * 1e6,
label='Joules', formatter='log', grid=True, color=c2, gridcolor=c2
)
pplt.rc.reset()
# %%
import proplot as pplt
pplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, axs = pplt.subplots(ncols=2, share=0, refaspect=0.4, refwidth=1.8)
axs.format(suptitle='Duplicate axes with special transformations')
# Pressure as the linear scale, height on opposite axis (scale height 7km)
ax = axs[0]
ax.format(
xformatter='null', ylabel='pressure (hPa)',
ylim=(1000, 10), xlocator=[], ycolor=c1, gridcolor=c1
)
ax.dualy(
'height', label='height (km)', ticks=2.5, color=c2, gridcolor=c2, grid=True
)
# Height as the linear scale, pressure on opposite axis (scale height 7km)
ax = axs[1] # span
ax.format(
xformatter='null', ylabel='height (km)', ylim=(0, 20), xlocator='null',
grid=True, gridcolor=c2, ycolor=c2
)
ax.dualy(
'pressure', label='pressure (hPa)', locator=100, color=c1, gridcolor=c1, grid=True,
)
pplt.rc.reset()
# %%
import proplot as pplt
import numpy as np
pplt.rc.margin = 0
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, ax = pplt.subplots(refaspect=(3, 1), figwidth=6)
# Sample data
cutoff = 1 / 5
x = np.linspace(0.01, 0.5, 1000) # in wavenumber days
response = (np.tanh(-((x - cutoff) / 0.03)) + 1) / 2 # response func
ax.axvline(cutoff, lw=2, ls='-', color=c2)
ax.fill_between([cutoff - 0.03, cutoff + 0.03], 0, 1, color=c2, alpha=0.3)
ax.plot(x, response, color=c1, lw=2)
# Add inverse scale to top
ax.format(
xlabel='wavenumber (days$^{-1}$)', ylabel='response', grid=False,
title='Imaginary response function',
suptitle='Duplicate axes with wavenumber and period',
)
ax = ax.dualx(
'inverse', locator='log', locator_kw={'subs': (1, 2, 5)}, label='period (days)'
)
pplt.rc.reset()
| # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cartesian:
#
# Cartesian plots
# ===============
#
# This section documents features used for modifying Cartesian *x* and *y*
# axis settings, including axis scales, tick locations, and tick label
# formatting. It also documents a handy "dual axes" feature.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_locators:
#
# Tick locations
# --------------
#
# Matplotlib `tick locators
# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-locators.html>`__
# select sensible tick locations based on the axis data limits. In ProPlot, you can
# change the tick locator using the `~proplot.axes.CartesianAxes.format` keyword
# arguments `xlocator`, `ylocator`, `xminorlocator`, and `yminorlocator` (or their
# aliases, `xticks`, `yticks`, `xminorticks`, and `yminorticks`). This is powered by
# the `~proplot.constructor.Locator` :ref:`constructor function <why_constructor>`.
#
# These keyword arguments can be used to apply built-in matplotlib
# `~matplotlib.ticker.Locator`\ s by their "registered" names (e.g.
# ``xlocator='log'``), to draw ticks every ``N`` data values with
# `~matplotlib.ticker.MultipleLocator` (e.g., ``xlocator=2``), or to tick the
# specific locations in a list using `~matplotlib.ticker.FixedLocator` (just
# like `~matplotlib.axes.Axes.set_xticks` and
# `~matplotlib.axes.Axes.set_yticks`). See
# `~proplot.axes.CartesianAxes.format` and `~proplot.constructor.Locator` for
# details.
#
# To generate lists of tick locations, we recommend using ProPlot's
# `~proplot.utils.arange` function -- it’s basically an *endpoint-inclusive*
# version of `numpy.arange`, which is usually what you'll want in this
# context.
# %%
import proplot as pplt
import numpy as np
state = np.random.RandomState(51423)
pplt.rc.update(
facecolor=pplt.scale_luminance('powderblue', 1.15),
linewidth=1, fontsize=10,
color='dark blue', suptitlecolor='dark blue',
titleloc='upper center', titlecolor='dark blue', titleborder=False,
)
fig, axs = pplt.subplots(nrows=8, refwidth=5, refaspect=(8, 1), share=0)
axs.format(suptitle='Tick locators demo')
# Step size for tick locations
axs[0].format(
xlim=(0, 200), xminorlocator=10, xlocator=30,
title='MultipleLocator'
)
# Specific list of locations
axs[1].format(
xlim=(0, 10), xminorlocator=0.1,
xlocator=[0, 0.3, 0.8, 1.6, 4.4, 8, 8.8, 10],
title='FixedLocator',
)
# Ticks at numpy.linspace(xmin, xmax, N)
axs[2].format(
xlim=(0, 10), xlocator=('linear', 21),
title='LinearLocator',
)
# Logarithmic locator, used automatically for log scale plots
axs[3].format(
xlim=(1, 100), xlocator='log', xminorlocator='logminor',
title='LogLocator',
)
# Maximum number of ticks, but at "nice" locations
axs[4].format(
xlim=(1, 7), xlocator=('maxn', 11),
title='MaxNLocator',
)
# Index locator, only draws ticks where data is plotted
axs[5].plot(np.arange(10) - 5, state.rand(10), alpha=0)
axs[5].format(
xlim=(0, 6), ylim=(0, 1), xlocator='index',
xformatter=[r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'],
title='IndexLocator',
)
pplt.rc.reset()
# Hide all ticks
axs[6].format(
xlim=(-10, 10), xlocator='null',
title='NullLocator',
)
# Tick locations that cleanly divide 60 minute/60 second intervals
axs[7].format(
xlim=(0, 2), xlocator='dms', xformatter='dms',
title='Degree-Minute-Second Locator (requires cartopy)',
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_formatters:
#
# Tick formatting
# ---------------
#
# Matplotlib `tick formatters
# <https://matplotlib.org/stable/gallery/ticks_and_spines/tick-formatters.html>`__
# convert floating point numbers to nicely-formatted tick labels. In ProPlot, you can
# change the tick formatter using the `~proplot.axes.CartesianAxes.format` keyword
# arguments `xformatter` and `yformatter` (or their aliases, `xticklabels` and
# `yticklabels`). This is powered by the `~proplot.constructor.Formatter`
# :ref:`constructor function <why_constructor>`.
#
# These keyword arguments can be used to apply built-in matplotlib
# `~matplotlib.ticker.Formatter`\ s by their "registered" names (e.g.
# ``xformatter='log'``), to apply a ``%``-style format directive with
# `~matplotlib.ticker.FormatStrFormatter` (e.g., ``xformatter='%.0f'``), or
# to apply custom tick labels with `~matplotlib.ticker.FixedFormatter` (just
# like `~matplotlib.axes.Axes.set_xticklabels` and
# `~matplotlib.axes.Axes.set_yticklabels`). They can also be used
# to apply one of ProPlot's new tick formatters -- for example,
# ``xformatter='deglat'`` to label ticks as the geographic latitude,
# ``xformatter='pi'`` to label ticks as fractions of :math:`\pi`,
# or ``xformatter='sci'`` to label ticks with scientific notation.
# See `~proplot.axes.CartesianAxes.format` and
# `~proplot.constructor.Formatter` for details.
#
# ProPlot also changes the default tick formatter to
# `~proplot.ticker.AutoFormatter`. This class trims trailing zeros by
# default, can be used to *omit tick labels* outside of some data range, and
# can add arbitrary prefixes and suffixes to each label. See
# `~proplot.ticker.AutoFormatter` for details. To disable the trailing
# zero-trimming feature, set :rcraw:`formatter.zerotrim` to ``False``.
# %%
import proplot as pplt
pplt.rc.linewidth = 2
pplt.rc.fontsize = 11
locator = [0, 0.25, 0.5, 0.75, 1]
fig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.5, share=0)
# Formatter comparison
axs[0].format(
xformatter='scalar', yformatter='scalar', title='Matplotlib formatter'
)
axs[1].format(yticklabelloc='both', title='ProPlot formatter')
axs[:2].format(xlocator=locator, ylocator=locator)
# Limiting the tick range
axs[2].format(
title='Omitting tick labels', ticklen=5, xlim=(0, 5), ylim=(0, 5),
xtickrange=(0, 2), ytickrange=(0, 2), xlocator=1, ylocator=1
)
# Setting the wrap range
axs[3].format(
title='Wrapping the tick range', ticklen=5, xlim=(0, 7), ylim=(0, 6),
xwraprange=(0, 5), ywraprange=(0, 3), xlocator=1, ylocator=1
)
axs.format(
ytickloc='both', yticklabelloc='both',
titlepad='0.5em', suptitle='Default formatters demo'
)
pplt.rc.reset()
# %%
import proplot as pplt
import numpy as np
pplt.rc.update(
linewidth=1.2, fontsize=10, facecolor='gray0', figurefacecolor='gray2',
color='gray8', gridcolor='gray8', titlecolor='gray8', suptitlecolor='gray8',
titleloc='upper center', titleborder=False,
)
fig, axs = pplt.subplots(nrows=9, refwidth=5, refaspect=(8, 1), share=0)
# Scientific notation
axs[0].format(xlim=(0, 1e20), xformatter='sci', title='SciFormatter')
# N significant figures for ticks at specific values
axs[1].format(
xlim=(0, 20), xlocator=(0.0034, 3.233, 9.2, 15.2344, 7.2343, 19.58),
xformatter=('sigfig', 2), title='SigFigFormatter', # 2 significant digits
)
# Fraction formatters
axs[2].format(
xlim=(0, 3 * np.pi), xlocator=np.pi / 4, xformatter='pi', title='FracFormatter',
)
axs[3].format(
xlim=(0, 2 * np.e), xlocator=np.e / 2, xticklabels='e', title='FracFormatter',
)
# Geographic formatters
axs[4].format(
xlim=(-90, 90), xlocator=30, xformatter='deglat', title='Latitude Formatter'
)
axs[5].format(
xlim=(0, 360), xlocator=60, xformatter='deglon', title='Longitude Formatter'
)
# User input labels
axs[6].format(
xlim=(-1.01, 1), xlocator=0.5,
xticklabels=['a', 'b', 'c', 'd', 'e'], title='FixedFormatter',
)
# Custom style labels
axs[7].format(
xlim=(0, 0.001), xlocator=0.0001, xformatter='%.E', title='FormatStrFormatter',
)
axs[8].format(
xlim=(0, 100), xtickminor=False, xlocator=20,
xformatter='{x:.1f}', title='StrMethodFormatter',
)
axs.format(ylocator='null', suptitle='Tick formatters demo')
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_datetime:
#
# Datetime ticks
# --------------
#
# ProPlot can also be used to customize the tick locations and tick label
# format of "datetime" axes.
# To draw ticks on some particular time unit, just use a unit string (e.g.,
# ``xlocator='month'``). To draw ticks every ``N`` time units, just use a (unit, N)
# tuple (e.g., ``xlocator=('day', 5)``). For `% style formatting
# <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
# of datetime tick labels, just use a string containing ``'%'`` (e.g.
# ``xformatter='%Y-%m-%d'``).
# See `~proplot.axes.CartesianAxes.format`, `~proplot.constructor.Locator`,
# and `~proplot.constructor.Formatter` for details.
# %%
import proplot as pplt
import numpy as np
pplt.rc.update(
linewidth=1.2, fontsize=10, ticklenratio=0.7,
figurefacecolor='w', facecolor='pastel blue',
titleloc='upper center', titleborder=False,
)
fig, axs = pplt.subplots(nrows=5, refwidth=6, refaspect=(8, 1), share=0)
axs[:4].format(xrotation=0) # no rotation for these examples
# Default date locator
# This is enabled if you plot datetime data or set datetime limits
axs[0].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-02')),
title='Auto date locator and formatter'
)
# Concise date formatter introduced in matplotlib 3.1
axs[1].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2001-01-01')),
xformatter='concise', title='Concise date formatter',
)
# Minor ticks every year, major every 10 years
axs[2].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2050-01-01')),
xlocator=('year', 10), xformatter='\'%y', title='Ticks every N units',
)
# Minor ticks every 10 minutes, major every 2 minutes
axs[3].format(
xlim=(np.datetime64('2000-01-01T00:00:00'), np.datetime64('2000-01-01T12:00:00')),
xlocator=('hour', range(0, 24, 2)), xminorlocator=('minute', range(0, 60, 10)),
xformatter='T%H:%M:%S', title='Ticks at specific intervals',
)
# Month and year labels, with default tick label rotation
axs[4].format(
xlim=(np.datetime64('2000-01-01'), np.datetime64('2008-01-01')),
xlocator='year', xminorlocator='month', # minor ticks every month
xformatter='%b %Y', title='Ticks with default rotation',
)
axs.format(
ylocator='null', suptitle='Datetime locators and formatters demo'
)
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_scales:
#
# Axis scale changes
# ------------------
#
# "Axis scales" like ``'linear'`` and ``'log'`` control the *x* and *y* axis
# coordinate system. To change the axis scale, simply pass e.g.
# ``xscale='log'`` or ``yscale='log'`` to `~proplot.axes.Axes.format`. This
# is powered by the `~proplot.constructor.Scale`
# :ref:`constructor function <why_constructor>`.
#
# ProPlot also makes several changes to the axis scale API:
#
# * The `~proplot.ticker.AutoFormatter` formatter is now used for all axis scales
# by default, including ``'log'`` and ``'symlog'``. Matplotlib's behavior can
# be restored by passing e.g. ``xformatter='log'`` or ``yformatter='log'`` to
# `~proplot.axes.CartesianAxes.format`.
# * To make its behavior consistent with `~proplot.constructor.Locator` and
# `~proplot.constructor.Formatter`, the `~proplot.constructor.Scale`
# constructor function returns instances of `~matplotlib.scale.ScaleBase`,
# and `~matplotlib.axes.Axes.set_xscale` and
# `~matplotlib.axes.Axes.set_yscale` now accept these class instances in
# addition to "registered" names like ``'log'``.
# * While matplotlib axis scales must be instantiated with an
# `~matplotlib.axis.Axis` instance (for backwards compatibility reasons),
# ProPlot axis scales can be instantiated without the axis instance
# (e.g., ``pplt.LogScale()`` instead of ``pplt.LogScale(ax.xaxis)``).
# * The default `subs` for the ``'symlog'`` axis scale is now ``np.arange(1, 10)``,
# and the default `linthresh` is now ``1``. Also the ``'log'`` and ``'symlog'``
# axis scales now accept the keywords `base`, `linthresh`, `linscale`, and
# `subs` rather than keywords with trailing ``x`` or ``y``.
# %%
import proplot as pplt
import numpy as np
N = 200
lw = 3
pplt.rc.update({
'linewidth': 1, 'ticklabelweight': 'bold', 'axeslabelweight': 'bold'
})
fig, axs = pplt.subplots(ncols=2, nrows=2, refwidth=1.8, share=0)
axs.format(suptitle='Axis scales demo', ytickminor=True)
# Linear and log scales
axs[0].format(yscale='linear', ylabel='linear scale')
axs[1].format(ylim=(1e-3, 1e3), yscale='log', ylabel='log scale')
axs[:2].plot(np.linspace(0, 1, N), np.linspace(0, 1000, N), lw=lw)
# Symlog scale
ax = axs[2]
ax.format(yscale='symlog', ylabel='symlog scale')
ax.plot(np.linspace(0, 1, N), np.linspace(-1000, 1000, N), lw=lw)
# Logit scale
ax = axs[3]
ax.format(yscale='logit', ylabel='logit scale')
ax.plot(np.linspace(0, 1, N), np.linspace(0.01, 0.99, N), lw=lw)
pplt.rc.reset()
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_scales_new:
#
# Special axis scales
# -------------------
#
# ProPlot introduces several new axis scales. The ``'cutoff'`` scale (see
# `~proplot.scale.CutoffScale`) is useful when the statistical distribution
# of your data is very unusual. The ``'sine'`` scale (see
# `~proplot.scale.SineLatitudeScale`) scales the axis with a sine function,
# resulting in an *area weighted* spherical latitude coordinate, and the
# ``'mercator'`` scale (see `~proplot.scale.MercatorLatitudeScale`) scales
# the axis with the Mercator projection latitude coordinate. The
# ``'inverse'`` scale (see `~proplot.scale.InverseScale`) can be useful when
# working with spectral data, especially with
# :ref:`"dual" unit axes <ug_dual>`.
# %%
import proplot as pplt
import numpy as np
fig, axs = pplt.subplots(nrows=4, refaspect=(5, 1), figwidth=6, sharex=False)
ax = axs[0]
# Sample data
x = np.linspace(0, 4 * np.pi, 100)
dy = np.linspace(-1, 1, 5)
y1 = np.sin(x)
y2 = np.cos(x)
state = np.random.RandomState(51423)
data = state.rand(len(dy) - 1, len(x) - 1)
# Loop through various cutoff scale options
titles = ('Zoom out of left', 'Zoom into left', 'Discrete jump', 'Fast jump')
args = (
(np.pi, 3), # speed up
(3 * np.pi, 1 / 3), # slow down
(np.pi, np.inf, 3 * np.pi), # discrete jump
(np.pi, 5, 3 * np.pi) # fast jump
)
locators = (
np.pi / 3,
np.pi / 3,
np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),
np.pi * np.append(np.linspace(0, 1, 4), np.linspace(3, 4, 4)),
)
for ax, iargs, title, locator in zip(axs, args, titles, locators):
ax.pcolormesh(x, dy, data, cmap='grays', cmap_kw={'right': 0.8})
for y, color in zip((y1, y2), ('coral', 'sky blue')):
ax.plot(x, y, lw=4, color=color)
ax.format(
xscale=('cutoff', *iargs), title=title,
xlim=(0, 4 * np.pi), ylabel='wave amplitude',
xformatter='pi', xlocator=locator,
xtickminor=False, xgrid=True, ygrid=False, suptitle='Cutoff axis scales demo'
)
# %%
import proplot as pplt
import numpy as np
# Create figure
pplt.rc.reset()
state = np.random.RandomState(51423)
colors = ('coral', 'sky blue')
fig, axs = pplt.subplots(nrows=2, ncols=3, refwidth=1.7, share=0, order='F')
axs.format(
toplabels=('Geographic scales', 'Exponential scales', 'Power scales'),
)
# Geographic scales
n = 20
x = np.linspace(-180, 180, n)
y1 = np.linspace(-85, 85, n)
y2 = np.linspace(-85, 85, n)
data = state.rand(len(x) - 1, len(y2) - 1)
for ax, scale, color in zip(axs[:2], ('sine', 'mercator'), colors):
ax.plot(x, y1, '-', color=color, lw=4)
ax.pcolormesh(x, y2, data, cmap='grays', cmap_kw={'right': 0.8})
ax.format(
title=scale.title() + ' y-axis', yscale=scale, ytickloc='left',
yformatter='deg', grid=False, ylocator=20,
xscale='linear', xlim=None, ylim=(-85, 85)
)
# Exp scales
x = np.linspace(0, 1, 50)
y = 10 * x
data = state.rand(len(y) - 1, len(x) - 1)
for ax, a, c, color in zip(axs[2:4], (np.e, 2), (0.5, 2), colors):
ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})
ax.plot(x, y, lw=4, color=color)
ax.format(
ylim=(0.1, 10), yscale=('exp', a, c),
title=f"${(a, 'e')[a == np.e]}^{{{(c, '')[c == 1]}x}}$"
)
# Power scales
for ax, power, color in zip(axs[4:], (2, 1 / 4), colors):
ax.pcolormesh(x, y, data, cmap='grays', cmap_kw={'right': 0.8})
ax.plot(x, y, lw=4, color=color)
ax.format(
ylim=(0.1, 10), yscale=('power', power),
title=f'$x^{{{power}}}$'
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_dual:
#
# Dual unit scales
# ----------------
#
# The `~proplot.axes.CartesianAxes.dualx` and
# `~proplot.axes.CartesianAxes.dualy` methods can be used to draw duplicate
# *x* and *y* axes meant to represent *alternate units* in the same
# coordinate range as the "parent" axis. This feature is powered by the
# `~proplot.scale.FuncScale` class.
#
# `~proplot.axes.CartesianAxes.dualx` and `~proplot.axes.CartesianAxes.dualy`
# accept either (1) a single linear forward function, (2) a pair of arbitrary
# forward and inverse functions, or (3) a scale name or scale class instance.
# In the latter case, the scale's transforms are used for the forward and
# inverse functions, and the scale's default locators and formatters are used
# for the default `~proplot.scale.FuncScale` locators and formatters.
#
# In the below examples, we generate dual axes with each of these three methods. Note
# that the "parent" axis scale is now arbitrary -- in the first example shown below,
# we create a `~proplot.axes.CartesianAxes.dualx` axis for an axis scaled by the
# `symlog scale <https://matplotlib.org/stable/gallery/scales/symlog_demo.html>`__.
# %%
import proplot as pplt
pplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, axs = pplt.subplots(
[[1, 1, 2, 2], [0, 3, 3, 0]],
share=0, refaspect=2.2, refwidth=3
)
axs.format(
suptitle='Duplicate axes with custom transformations',
xcolor=c1, gridcolor=c1,
ylocator=[], yformatter=[]
)
# Meters and kilometers
ax = axs[0]
ax.format(xlim=(0, 5000), xlabel='meters')
ax.dualx(
lambda x: x * 1e-3,
label='kilometers', grid=True, color=c2, gridcolor=c2
)
# Kelvin and Celsius
ax = axs[1]
ax.format(xlim=(200, 300), xlabel='temperature (K)')
ax.dualx(
lambda x: x - 273.15,
label='temperature (\N{DEGREE SIGN}C)', grid=True, color=c2, gridcolor=c2
)
# With symlog parent
ax = axs[2]
ax.format(xlim=(-100, 100), xscale='symlog', xlabel='MegaJoules')
ax.dualx(
lambda x: x * 1e6,
label='Joules', formatter='log', grid=True, color=c2, gridcolor=c2
)
pplt.rc.reset()
# %%
import proplot as pplt
pplt.rc.update({'grid.alpha': 0.4, 'linewidth': 1, 'grid.linewidth': 1})
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, axs = pplt.subplots(ncols=2, share=0, refaspect=0.4, refwidth=1.8)
axs.format(suptitle='Duplicate axes with special transformations')
# Pressure as the linear scale, height on opposite axis (scale height 7km)
ax = axs[0]
ax.format(
xformatter='null', ylabel='pressure (hPa)',
ylim=(1000, 10), xlocator=[], ycolor=c1, gridcolor=c1
)
ax.dualy(
'height', label='height (km)', ticks=2.5, color=c2, gridcolor=c2, grid=True
)
# Height as the linear scale, pressure on opposite axis (scale height 7km)
ax = axs[1] # span
ax.format(
xformatter='null', ylabel='height (km)', ylim=(0, 20), xlocator='null',
grid=True, gridcolor=c2, ycolor=c2
)
ax.dualy(
'pressure', label='pressure (hPa)', locator=100, color=c1, gridcolor=c1, grid=True,
)
pplt.rc.reset()
# %%
import proplot as pplt
import numpy as np
pplt.rc.margin = 0
c1 = pplt.scale_luminance('cerulean', 0.5)
c2 = pplt.scale_luminance('red', 0.5)
fig, ax = pplt.subplots(refaspect=(3, 1), figwidth=6)
# Sample data
cutoff = 1 / 5
x = np.linspace(0.01, 0.5, 1000) # in wavenumber days
response = (np.tanh(-((x - cutoff) / 0.03)) + 1) / 2 # response func
ax.axvline(cutoff, lw=2, ls='-', color=c2)
ax.fill_between([cutoff - 0.03, cutoff + 0.03], 0, 1, color=c2, alpha=0.3)
ax.plot(x, response, color=c1, lw=2)
# Add inverse scale to top
ax.format(
xlabel='wavenumber (days$^{-1}$)', ylabel='response', grid=False,
title='Imaginary response function',
suptitle='Duplicate axes with wavenumber and period',
)
ax = ax.dualx(
'inverse', locator='log', locator_kw={'subs': (1, 2, 5)}, label='period (days)'
)
pplt.rc.reset()
|
import configparser
import os
import secrets
from pathlib import Path
import click
import click_spinner
import shortuuid
from git import Repo
from git.exc import InvalidGitRepositoryError
from .github import GitHub
CONFIG_FILE = 'config.ini'
QQQ = 'qqq'
@click.group()
def cli():
"""
QQQ allows you to easily share your currently checked-out git branch with
other people via GitHub.
How to use QQQ:\n
1. Obtain a personal access token from GitHub with the full `repo` permission.\n
2. Use `qqq login` to save your GitHub access token to the QQQ config file.\n
3. `cd` to your local git repository and run `qqq send` to share the currently
checked-out branch with other GitHub users.
"""
pass
@cli.command()
@click.option('-u', '--user', 'user', help='Your GitHub username.')
@click.option('-t', '--token', 'token', help='Your GitHub personal access token.')
def login(user, token):
"""Save your GitHub access token."""
app_dir = click.get_app_dir(QQQ)
config_path = f'{app_dir}/{CONFIG_FILE}'
# Verify user
with click_spinner.spinner():
if not GitHub.verify_token(user, token):
click.echo(click.style('Invalid GitHub username or token!', fg='red'))
raise click.Abort
# Check if file already exists
if Path(config_path).is_file():
# File exists, prompt to overwrite
click.confirm(f'{click.format_filename(config_path)} already exists, update?', abort=True)
# Create config object
cp = configparser.ConfigParser()
cp['auth'] = {
'user': user,
'token': token
}
# Make sure the qqq dir exists
if not Path(app_dir).is_dir():
click.echo(f'Creating directory {click.format_filename(app_dir)}...')
Path(app_dir).mkdir(parents=True, exist_ok=True)
# Write to config file
with open(config_path, 'w') as config_file:
cp.write(config_file)
click.echo(f'Updated config file located at:\t{click.format_filename(config_path)}')
@cli.command()
@click.argument('github_username')
@click.option('-a', '--admins', multiple=True, required=False, help='GitHub users to invite as admin collaborators.')
def send(github_username, admins):
"""Share your local branch with other GitHub users."""
config_path = f'{click.get_app_dir(QQQ)}/{CONFIG_FILE}'
# Create the repo object
try:
repo = Repo(os.getcwd())
except InvalidGitRepositoryError:
click.echo(click.style('Please use qqq from within a valid git repository.', fg='red'))
raise click.Abort
if repo.bare:
# Confirm the user wants to use an empty repo
click.confirm('Repository appears to be bare, continue?', abort=True)
# Make sure config file exists
if not Path(config_path).is_file():
click.echo(click.style('Config files does not exist. Run `qqq login`.', fg='red'))
raise click.Abort
# Read the config file
cp = configparser.ConfigParser()
try:
cp.read(config_path)
auth_user = cp.get('auth', 'user')
auth_token = cp.get('auth', 'token')
except configparser.Error:
click.echo(click.style('Malformed configuration file.', fg='red'))
raise click.Abort
gh = GitHub(auth_user, auth_token)
# Verify user exists on GitHub
user = gh.get_user(github_username)
if user is None:
click.echo(f'Could not find GitHub user {github_username}.')
raise click.Abort
# Generate new repo name
repo_name = f'{github_username}-{shortuuid.uuid()}'
# Ask user for branch name
branch_name = click.prompt('Enter the branch name on the remote repository', default='master')
# Confirm with user
click.echo(f'Preparing to send the current branch to {github_username}...')
_repo = f''
_msg = f'''Are you sure you want to send the current branch to {user["login"]} ({user["name"]})? This will:
\t1. Take the current `{repo.active_branch}` branch and force push to {auth_user}/{repo_name} on GitHub (private)
\t2. Invite {github_username} as a collaborator\n'''
if admins:
_msg += f'\t3. Invite {', '.join([str(a) for a in admins])} as {'an ' if len(admins) == 1 else ''}' \
f'admin collaborator{'s' if len(admins) > 1 else ''}\n'
click.confirm(click.style(_msg, fg='cyan'), abort=True)
click.echo(f'Creating repo on GitHub and inviting {user['login']}...')
with click_spinner.spinner():
# Create repo on GitHub
new_repo = gh.create_repo(repo_name)
if new_repo is None:
click.echo(click.style('Failed to create repository on GitHub.', fg='red'))
raise click.Abort
# Push the current branch to the new repo
_tmp_remote_name = secrets.token_urlsafe()
_tmp_remote_url = f'https://{auth_token}:x-oauth-basic@github.com/{auth_user}/{repo_name}.git'
new_remote = repo.create_remote(_tmp_remote_name, _tmp_remote_url)
new_remote.push(f'{repo.head.ref}:{branch_name}')
repo.delete_remote(_tmp_remote_name)
if not gh.add_collaborator(repo_name, user["login"]):
click.echo(click.style(f'Error inviting {user['login']}.', fg='red'))
# Invite admin collaborators
for admin_username in admins:
au = gh.get_user(admin_username) # Verify the admin collaborator's GitHub account
if au:
click.confirm(click.style(f'Are you sure you want to invite {au['login']} as an admin?', fg='cyan'))
click.echo(f'Inviting admin {au['login']} ({au['name']})...')
with click_spinner.spinner():
if not gh.add_collaborator(repo_name, admin_username, admin=True):
click.echo(click.style(f'Error inviting {au['login']}.', fg='red'))
else:
click.echo(click.style(f'Could not find {admin_username}.', fg='red'))
click.echo('Done!')
@cli.command()
def whoami():
"""Get the logged-in GitHub user."""
app_dir = click.get_app_dir(QQQ)
config_path = f'{app_dir}/{CONFIG_FILE}'
if not Path(config_path).is_file():
click.echo(f'Config file does not exist! Run `qqq login` first.')
cp = configparser.ConfigParser()
cp.read(config_path)
click.echo(cp['auth']['user'])
| import configparser
import os
import secrets
from pathlib import Path
import click
import click_spinner
import shortuuid
from git import Repo
from git.exc import InvalidGitRepositoryError
from .github import GitHub
CONFIG_FILE = 'config.ini'
QQQ = 'qqq'
@click.group()
def cli():
"""
QQQ allows you to easily share your currently checked-out git branch with
other people via GitHub.
How to use QQQ:\n
1. Obtain a personal access token from GitHub with the full `repo` permission.\n
2. Use `qqq login` to save your GitHub access token to the QQQ config file.\n
3. `cd` to your local git repository and run `qqq send` to share the currently
checked-out branch with other GitHub users.
"""
pass
@cli.command()
@click.option('-u', '--user', 'user', help='Your GitHub username.')
@click.option('-t', '--token', 'token', help='Your GitHub personal access token.')
def login(user, token):
"""Save your GitHub access token."""
app_dir = click.get_app_dir(QQQ)
config_path = f'{app_dir}/{CONFIG_FILE}'
# Verify user
with click_spinner.spinner():
if not GitHub.verify_token(user, token):
click.echo(click.style('Invalid GitHub username or token!', fg='red'))
raise click.Abort
# Check if file already exists
if Path(config_path).is_file():
# File exists, prompt to overwrite
click.confirm(f'{click.format_filename(config_path)} already exists, update?', abort=True)
# Create config object
cp = configparser.ConfigParser()
cp['auth'] = {
'user': user,
'token': token
}
# Make sure the qqq dir exists
if not Path(app_dir).is_dir():
click.echo(f'Creating directory {click.format_filename(app_dir)}...')
Path(app_dir).mkdir(parents=True, exist_ok=True)
# Write to config file
with open(config_path, 'w') as config_file:
cp.write(config_file)
click.echo(f'Updated config file located at:\t{click.format_filename(config_path)}')
@cli.command()
@click.argument('github_username')
@click.option('-a', '--admins', multiple=True, required=False, help='GitHub users to invite as admin collaborators.')
def send(github_username, admins):
"""Share your local branch with other GitHub users."""
config_path = f'{click.get_app_dir(QQQ)}/{CONFIG_FILE}'
# Create the repo object
try:
repo = Repo(os.getcwd())
except InvalidGitRepositoryError:
click.echo(click.style('Please use qqq from within a valid git repository.', fg='red'))
raise click.Abort
if repo.bare:
# Confirm the user wants to use an empty repo
click.confirm('Repository appears to be bare, continue?', abort=True)
# Make sure config file exists
if not Path(config_path).is_file():
click.echo(click.style('Config files does not exist. Run `qqq login`.', fg='red'))
raise click.Abort
# Read the config file
cp = configparser.ConfigParser()
try:
cp.read(config_path)
auth_user = cp.get('auth', 'user')
auth_token = cp.get('auth', 'token')
except configparser.Error:
click.echo(click.style('Malformed configuration file.', fg='red'))
raise click.Abort
gh = GitHub(auth_user, auth_token)
# Verify user exists on GitHub
user = gh.get_user(github_username)
if user is None:
click.echo(f'Could not find GitHub user {github_username}.')
raise click.Abort
# Generate new repo name
repo_name = f'{github_username}-{shortuuid.uuid()}'
# Ask user for branch name
branch_name = click.prompt('Enter the branch name on the remote repository', default='master')
# Confirm with user
click.echo(f'Preparing to send the current branch to {github_username}...')
_repo = f''
_msg = f'''Are you sure you want to send the current branch to {user["login"]} ({user["name"]})? This will:
\t1. Take the current `{repo.active_branch}` branch and force push to {auth_user}/{repo_name} on GitHub (private)
\t2. Invite {github_username} as a collaborator\n'''
if admins:
_msg += f'\t3. Invite {", ".join([str(a) for a in admins])} as {"an " if len(admins) == 1 else ""}' \
f'admin collaborator{"s" if len(admins) > 1 else ""}\n'
click.confirm(click.style(_msg, fg='cyan'), abort=True)
click.echo(f'Creating repo on GitHub and inviting {user["login"]}...')
with click_spinner.spinner():
# Create repo on GitHub
new_repo = gh.create_repo(repo_name)
if new_repo is None:
click.echo(click.style('Failed to create repository on GitHub.', fg='red'))
raise click.Abort
# Push the current branch to the new repo
_tmp_remote_name = secrets.token_urlsafe()
_tmp_remote_url = f'https://{auth_token}:x-oauth-basic@github.com/{auth_user}/{repo_name}.git'
new_remote = repo.create_remote(_tmp_remote_name, _tmp_remote_url)
new_remote.push(f'{repo.head.ref}:{branch_name}')
repo.delete_remote(_tmp_remote_name)
if not gh.add_collaborator(repo_name, user["login"]):
click.echo(click.style(f'Error inviting {user["login"]}.', fg='red'))
# Invite admin collaborators
for admin_username in admins:
au = gh.get_user(admin_username) # Verify the admin collaborator's GitHub account
if au:
click.confirm(click.style(f'Are you sure you want to invite {au["login"]} as an admin?', fg='cyan'))
click.echo(f'Inviting admin {au["login"]} ({au["name"]})...')
with click_spinner.spinner():
if not gh.add_collaborator(repo_name, admin_username, admin=True):
click.echo(click.style(f'Error inviting {au["login"]}.', fg='red'))
else:
click.echo(click.style(f'Could not find {admin_username}.', fg='red'))
click.echo('Done!')
@cli.command()
def whoami():
"""Get the logged-in GitHub user."""
app_dir = click.get_app_dir(QQQ)
config_path = f'{app_dir}/{CONFIG_FILE}'
if not Path(config_path).is_file():
click.echo(f'Config file does not exist! Run `qqq login` first.')
cp = configparser.ConfigParser()
cp.read(config_path)
click.echo(cp['auth']['user'])
|
from datetime import datetime, timedelta
from typing import Optional, Iterator
import databases
import enum
import jwt
import sqlalchemy
import uvicorn
from databases.backends.postgres import Record
from email_validator import EmailNotValidError, validate_email as ve
from fastapi import FastAPI, HTTPException, Depends
from decouple import config
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, validator
from passlib.context import CryptContext
from starlette.requests import Request
DATABASE_URL = (
f"postgresql://{config("DB_USER")}:"
f"{config("DB_PASSWORD")}@localhost:5433/clothes"
)
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
app = FastAPI()
pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')
class UserRole(enum.Enum):
super_admin = "super admin"
admin = "admin"
user = "user"
class ColorEnum(enum.Enum):
pink = "pink"
black = "black"
white = "white"
yellow = "yellow"
class SizeEnum(enum.Enum):
xs = "xs"
s = "s"
m = "m"
ll = "l"
xl = "xl"
xxl = "xxl"
users = sqlalchemy.Table(
"users",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("email", sqlalchemy.String(120), unique=True),
sqlalchemy.Column("password", sqlalchemy.String(255)),
sqlalchemy.Column("full_name", sqlalchemy.String(200)),
sqlalchemy.Column("phone", sqlalchemy.String(13)),
sqlalchemy.Column(
"created_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"last_modified_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
onupdate=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"role",
sqlalchemy.Enum(UserRole),
nullable=False,
server_default=UserRole.user.name,
),
)
clothes = sqlalchemy.Table(
"clothes",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(120)),
sqlalchemy.Column("color", sqlalchemy.Enum(ColorEnum), nullable=False),
sqlalchemy.Column("size", sqlalchemy.Enum(SizeEnum), nullable=False),
sqlalchemy.Column("photo_url", sqlalchemy.String(255)),
sqlalchemy.Column(
"created_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"last_modified_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
onupdate=sqlalchemy.func.now(),
),
)
def is_admin(request: Request) -> None:
user = request.state.user
if not user or user["role"] not in (UserRole.admin, UserRole.super_admin):
raise HTTPException(
403, "You do not have permissions for this resource"
)
def create_access_token(user: Record) -> Optional[str]:
try:
payload = {
"sub": user["id"],
"exp": datetime.utcnow() + timedelta(minutes=120),
}
return jwt.encode(payload, config("JWT_SECRET"), algorithm="HS256")
except Exception:
raise
class EmailField(str):
@classmethod
def __get_validators__(cls) -> Iterator:
yield cls.validate
@classmethod
def validate(cls, value: str) -> str:
try:
ve(value)
return value
except EmailNotValidError:
raise ValueError("Email is not valid")
class BaseUser(BaseModel):
email: EmailField
full_name: Optional[str]
@validator("full_name")
def validate_full_name(cls, value: str):
try:
assert len(value.split()) == 2
return value
except Exception:
raise ValueError("You should provide at least two names")
class UserSignIn(BaseUser):
password: str
class UserSignOut(BaseUser):
phone: Optional[str]
created_at: datetime
last_modified_at: datetime
class ClothesBase(BaseModel):
name: str
color: str
size: SizeEnum
color: ColorEnum
class ClothesIn(ClothesBase):
pass
class ClothesOut(ClothesBase):
id: int
created_at: datetime
last_modified_at: datetime
class CustomHTTPBearer(HTTPBearer):
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
res = await super().__call__(request)
try:
payload = jwt.decode(
res.credentials, config("JWT_SECRET"), algorithms=["HS256"]
)
user = await database.fetch_one(
users.select().where(users.c.id == payload["sub"])
)
request.state.user = user
return payload
except jwt.ExpiredSignatureError:
raise HTTPException(401, "Token is expired")
except jwt.InvalidTokenError:
raise HTTPException(401, "Invalid token")
oauth2_scheme = CustomHTTPBearer()
@app.on_event("startup")
async def startup() -> None:
await database.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
await database.disconnect()
@app.get("/clothes", dependencies=[Depends(oauth2_scheme)])
async def get_all_clothes() -> list:
return await database.fetch_all(clothes.select())
@app.post(
"/clothes",
response_model=ClothesOut,
dependencies=[Depends(oauth2_scheme), Depends(is_admin)],
status_code=201,
)
async def create_clothes(clothes_data: ClothesIn) -> Record:
id_ = await database.execute(clothes.insert().values(**clothes_data.dict()))
return await database.fetch_one(clothes.select().where(clothes.c.id == id_))
@app.post("/register")
async def create_user(user: UserSignIn) -> dict:
user.password = pwd_context.hash(user.password)
q = users.insert().values(**user.dict())
id_ = await database.execute(q)
created_user = await database.fetch_one(
users.select().where(users.c.id == id_)
)
token = create_access_token(created_user)
return {"token": token}
if __name__ == "__main__":
uvicorn.run(app)
| from datetime import datetime, timedelta
from typing import Optional, Iterator
import databases
import enum
import jwt
import sqlalchemy
import uvicorn
from databases.backends.postgres import Record
from email_validator import EmailNotValidError, validate_email as ve
from fastapi import FastAPI, HTTPException, Depends
from decouple import config
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, validator
from passlib.context import CryptContext
from starlette.requests import Request
DATABASE_URL = (
f"postgresql://{config('DB_USER')}:"
f"{config('DB_PASSWORD')}@localhost:5433/clothes"
)
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
app = FastAPI()
pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')
class UserRole(enum.Enum):
super_admin = "super admin"
admin = "admin"
user = "user"
class ColorEnum(enum.Enum):
pink = "pink"
black = "black"
white = "white"
yellow = "yellow"
class SizeEnum(enum.Enum):
xs = "xs"
s = "s"
m = "m"
ll = "l"
xl = "xl"
xxl = "xxl"
users = sqlalchemy.Table(
"users",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("email", sqlalchemy.String(120), unique=True),
sqlalchemy.Column("password", sqlalchemy.String(255)),
sqlalchemy.Column("full_name", sqlalchemy.String(200)),
sqlalchemy.Column("phone", sqlalchemy.String(13)),
sqlalchemy.Column(
"created_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"last_modified_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
onupdate=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"role",
sqlalchemy.Enum(UserRole),
nullable=False,
server_default=UserRole.user.name,
),
)
clothes = sqlalchemy.Table(
"clothes",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(120)),
sqlalchemy.Column("color", sqlalchemy.Enum(ColorEnum), nullable=False),
sqlalchemy.Column("size", sqlalchemy.Enum(SizeEnum), nullable=False),
sqlalchemy.Column("photo_url", sqlalchemy.String(255)),
sqlalchemy.Column(
"created_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
),
sqlalchemy.Column(
"last_modified_at",
sqlalchemy.DateTime,
nullable=False,
server_default=sqlalchemy.func.now(),
onupdate=sqlalchemy.func.now(),
),
)
def is_admin(request: Request) -> None:
user = request.state.user
if not user or user["role"] not in (UserRole.admin, UserRole.super_admin):
raise HTTPException(
403, "You do not have permissions for this resource"
)
def create_access_token(user: Record) -> Optional[str]:
try:
payload = {
"sub": user["id"],
"exp": datetime.utcnow() + timedelta(minutes=120),
}
return jwt.encode(payload, config("JWT_SECRET"), algorithm="HS256")
except Exception:
raise
class EmailField(str):
@classmethod
def __get_validators__(cls) -> Iterator:
yield cls.validate
@classmethod
def validate(cls, value: str) -> str:
try:
ve(value)
return value
except EmailNotValidError:
raise ValueError("Email is not valid")
class BaseUser(BaseModel):
email: EmailField
full_name: Optional[str]
@validator("full_name")
def validate_full_name(cls, value: str):
try:
assert len(value.split()) == 2
return value
except Exception:
raise ValueError("You should provide at least two names")
class UserSignIn(BaseUser):
password: str
class UserSignOut(BaseUser):
phone: Optional[str]
created_at: datetime
last_modified_at: datetime
class ClothesBase(BaseModel):
name: str
color: str
size: SizeEnum
color: ColorEnum
class ClothesIn(ClothesBase):
pass
class ClothesOut(ClothesBase):
id: int
created_at: datetime
last_modified_at: datetime
class CustomHTTPBearer(HTTPBearer):
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
res = await super().__call__(request)
try:
payload = jwt.decode(
res.credentials, config("JWT_SECRET"), algorithms=["HS256"]
)
user = await database.fetch_one(
users.select().where(users.c.id == payload["sub"])
)
request.state.user = user
return payload
except jwt.ExpiredSignatureError:
raise HTTPException(401, "Token is expired")
except jwt.InvalidTokenError:
raise HTTPException(401, "Invalid token")
oauth2_scheme = CustomHTTPBearer()
@app.on_event("startup")
async def startup() -> None:
await database.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
await database.disconnect()
@app.get("/clothes", dependencies=[Depends(oauth2_scheme)])
async def get_all_clothes() -> list:
return await database.fetch_all(clothes.select())
@app.post(
"/clothes",
response_model=ClothesOut,
dependencies=[Depends(oauth2_scheme), Depends(is_admin)],
status_code=201,
)
async def create_clothes(clothes_data: ClothesIn) -> Record:
id_ = await database.execute(clothes.insert().values(**clothes_data.dict()))
return await database.fetch_one(clothes.select().where(clothes.c.id == id_))
@app.post("/register")
async def create_user(user: UserSignIn) -> dict:
user.password = pwd_context.hash(user.password)
q = users.insert().values(**user.dict())
id_ = await database.execute(q)
created_user = await database.fetch_one(
users.select().where(users.c.id == id_)
)
token = create_access_token(created_user)
return {"token": token}
if __name__ == "__main__":
uvicorn.run(app)
|
import streamlit as st
import time
import requests
def main():
st.set_page_config( # Alternate names: setup_page, page, layout
layout="wide", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="The Big Language Model Workshop", # String or None. Strings get appended with "• Streamlit".
page_icon=None, # String, anything supported by st.image, or None.
)
st.title("The Big Language Model Workshop")
"""This app enables you to interact with large language models in a friendly way!"""
ex_names = [
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English.",
"The ancient people of Arcadia achieved oustanding cultural and technological developments. Below we summarise some of the highlights of the Acadian society.",
"""Tweet: "I hate it when my phone battery dies."
Sentiment: Negative
###
Tweet: My day has been 👍.
Sentiment: Positive
###
Tweet: This is the link to the article.
Sentiment: Neutral
###
Tweet: This new movie started strange but in the end it was awesome.
Sentiment:""",
"""Q: Fetch the departments that have less than five people in it.\n
A: SELECT DEPARTMENT, COUNT(WOKRED_ID) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT HAVING COUNT(WORKED_ID) < 5;\n
###\n
Q: Show all departments along with the number of people in each department\n
A: SELECT DEPARTMENT, COUNT(DEPARTMENT) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT;\n
###\n
Q: Show the last record of the Worker table\n
A: SELECT * FROM Worker ORDER BY LAST_NAME DESC LIMIT 1;\n
###\n
Q: Fetch the three max salaries from the Worker table;\n
A:""",
]
example = st.selectbox("Choose an example prompt from this selector", ex_names)
inp = st.text_area(
"Or write your own prompt here!", example, max_chars=2000, height=150
)
try:
rec = ex_names.index(inp)
except ValueError:
rec = 0
with st.beta_expander("Generation options..."):
length = st.slider(
"Choose the length of the generated texts (in tokens)",
2,
1024,
512 if rec < 2 else 50,
10,
)
temp = st.slider(
"Choose the temperature (higher - more random, lower - more repetitive). For the code generation or sentence classification promps it's recommended to use a lower value, like 0.35",
0.0,
1.5,
1.0 if rec < 2 else 0.35,
0.05,
)
response = None
with st.form(key="inputs"):
submit_button = st.form_submit_button(label="Generate!")
if submit_button:
payload = {
"context": inp,
"token_max_length": length,
"temperature": temp,
"top_p": 0.9,
}
query = requests.post("http://localhost:5000/generate", params=payload)
response = query.json()
st.markdown(response["prompt"] + response["text"])
st.text(f"Generation done in {response["compute_time"]:.3} s.")
if False:
col1, col2, *rest = st.beta_columns([1, 1, 10, 10])
def on_click_good():
response["rate"] = "good"
print(response)
def on_click_bad():
response["rate"] = "bad"
print(response)
col1.form_submit_button("👍", on_click=on_click_good)
col2.form_submit_button("👎", on_click=on_click_bad)
st.text("App baked with ❤️ by @vicgalle")
if __name__ == "__main__":
main()
| import streamlit as st
import time
import requests
def main():
st.set_page_config( # Alternate names: setup_page, page, layout
layout="wide", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="auto", # Can be "auto", "expanded", "collapsed"
page_title="The Big Language Model Workshop", # String or None. Strings get appended with "• Streamlit".
page_icon=None, # String, anything supported by st.image, or None.
)
st.title("The Big Language Model Workshop")
"""This app enables you to interact with large language models in a friendly way!"""
ex_names = [
"In a shocking finding, scientists discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English.",
"The ancient people of Arcadia achieved oustanding cultural and technological developments. Below we summarise some of the highlights of the Acadian society.",
"""Tweet: "I hate it when my phone battery dies."
Sentiment: Negative
###
Tweet: My day has been 👍.
Sentiment: Positive
###
Tweet: This is the link to the article.
Sentiment: Neutral
###
Tweet: This new movie started strange but in the end it was awesome.
Sentiment:""",
"""Q: Fetch the departments that have less than five people in it.\n
A: SELECT DEPARTMENT, COUNT(WOKRED_ID) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT HAVING COUNT(WORKED_ID) < 5;\n
###\n
Q: Show all departments along with the number of people in each department\n
A: SELECT DEPARTMENT, COUNT(DEPARTMENT) as "Number of Workers" FROM Worker GROUP BY DEPARTMENT;\n
###\n
Q: Show the last record of the Worker table\n
A: SELECT * FROM Worker ORDER BY LAST_NAME DESC LIMIT 1;\n
###\n
Q: Fetch the three max salaries from the Worker table;\n
A:""",
]
example = st.selectbox("Choose an example prompt from this selector", ex_names)
inp = st.text_area(
"Or write your own prompt here!", example, max_chars=2000, height=150
)
try:
rec = ex_names.index(inp)
except ValueError:
rec = 0
with st.beta_expander("Generation options..."):
length = st.slider(
"Choose the length of the generated texts (in tokens)",
2,
1024,
512 if rec < 2 else 50,
10,
)
temp = st.slider(
"Choose the temperature (higher - more random, lower - more repetitive). For the code generation or sentence classification promps it's recommended to use a lower value, like 0.35",
0.0,
1.5,
1.0 if rec < 2 else 0.35,
0.05,
)
response = None
with st.form(key="inputs"):
submit_button = st.form_submit_button(label="Generate!")
if submit_button:
payload = {
"context": inp,
"token_max_length": length,
"temperature": temp,
"top_p": 0.9,
}
query = requests.post("http://localhost:5000/generate", params=payload)
response = query.json()
st.markdown(response["prompt"] + response["text"])
st.text(f"Generation done in {response['compute_time']:.3} s.")
if False:
col1, col2, *rest = st.beta_columns([1, 1, 10, 10])
def on_click_good():
response["rate"] = "good"
print(response)
def on_click_bad():
response["rate"] = "bad"
print(response)
col1.form_submit_button("👍", on_click=on_click_good)
col2.form_submit_button("👎", on_click=on_click_bad)
st.text("App baked with ❤️ by @vicgalle")
if __name__ == "__main__":
main()
|
from telegram import InlineKeyboardButton
def get_products_keyboard(products):
keyboard = []
for product in products:
keyboard.append(
[
InlineKeyboardButton(product['description'], callback_data=product['id'])
]
)
return keyboard
def get_purchase_options_keyboard(product):
purchase_options = (1, 5, 10)
keyboard = []
purchase_option_button = []
for purchase_option in purchase_options:
purchase_option_button.append(
InlineKeyboardButton(f'{purchase_option} кг', callback_data=f'{product['id']},{purchase_option}')
)
keyboard.append(purchase_option_button)
return keyboard
def get_cart_button():
return InlineKeyboardButton('Корзина', callback_data='cart')
def get_menu_button():
return InlineKeyboardButton('В меню', callback_data='back')
def get_text_and_buttons_for_cart(products):
cart_text = ' '
keyboard = []
for product in products:
product_price = product['meta']['display_price']['with_tax']
cart_text = f"""\
{cart_text}
{product['description']}
{product_price['unit']['formatted']}
{product["quantity"]}кг на сумму {product_price["value"]["formatted"]}\
"""
keyboard.append([InlineKeyboardButton(f'Убрать из корзины {product['description']}',
callback_data=product['id'])])
return keyboard, cart_text
| from telegram import InlineKeyboardButton
def get_products_keyboard(products):
keyboard = []
for product in products:
keyboard.append(
[
InlineKeyboardButton(product['description'], callback_data=product['id'])
]
)
return keyboard
def get_purchase_options_keyboard(product):
purchase_options = (1, 5, 10)
keyboard = []
purchase_option_button = []
for purchase_option in purchase_options:
purchase_option_button.append(
InlineKeyboardButton(f'{purchase_option} кг', callback_data=f'{product["id"]},{purchase_option}')
)
keyboard.append(purchase_option_button)
return keyboard
def get_cart_button():
return InlineKeyboardButton('Корзина', callback_data='cart')
def get_menu_button():
return InlineKeyboardButton('В меню', callback_data='back')
def get_text_and_buttons_for_cart(products):
cart_text = ' '
keyboard = []
for product in products:
product_price = product['meta']['display_price']['with_tax']
cart_text = f"""\
{cart_text}
{product['description']}
{product_price['unit']['formatted']}
{product["quantity"]}кг на сумму {product_price["value"]["formatted"]}\
"""
keyboard.append([InlineKeyboardButton(f'Убрать из корзины {product["description"]}',
callback_data=product['id'])])
return keyboard, cart_text
|
# coding: utf-8
import os
import re
import sys
import time
import datetime
import numpy as np
from collections import defaultdict
def flatten_dual(lst):
return [e for sublist in lst for e in sublist]
def get_varname(var, scope_=globals()):
for varname,val in scope_.items():
if id(val)==id(var):
return varname
def disp_var_globals(*varnames, head_=True, align_=True, scope_=globals()):
"""
def func():
a = "hoge"
b = 123
c = [1,"1"]
disp_var_globals("a","b","c",scope=locals())
func()
>>> a: hoge
>>> b: 123
>>> c: [1, '1']
"""
if head_: print(f"#=== VARIABLE INFO ===")
digit = max([len(e) for e in varnames]) if align_ else 1
for var in varnames:
print(f"{var:<{digit}}: {scope_.get(var)}")
def disp_val_globals(*values, head_=True, align_=True, scope_=globals()):
"""
def func():
a = "hoge"
b = 123
c = [1,"1"]
disp_val_globals(a,b,c,scope=locals())
func()
>>> a: hoge
>>> b: 123
>>> c: [1, '1']
"""
if head_: print(f"#=== VARIABLE INFO ===")
names = [get_varname(val, scope_=scope_) for val in values]
digit = max([len(e) for e in names]) if align_ else 1
for name,val in zip(names, values):
print(f"{name:<{digit}}: {val}")
def disp_val_shapes(*values, head_=True, align_=True, scope_=globals()):
if head_: print(f"#=== ARRAY SHAPES ===")
names = [get_varname(val, scope_=scope_) for val in values]
digit = max([len(e) for e in names]) + 6 if align_ else 1
for name,val in zip(names, values):
print(f"{name+".shape":<{digit}}: {val.shape}")
_UID_PREFIXES = defaultdict(int)
def get_uid(prefix=""):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
class priColor:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
PURPLE = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RETURN = '\033[07m' # 反転
ACCENT = '\033[01m' # 強調
FLASH = '\033[05m' # 点滅
RED_FLASH = '\033[05;41m' # 赤背景+点滅
END = '\033[0m'
@staticmethod
def color(value, color=None):
if color is None:
return str(value)
else:
color = color.upper()
handleKeyError(priColor.__dict__.keys(), color=color)
return f"{priColor.__dict__[color.upper()]}{value}{priColor.END}"
def handleKeyError(lst, msg_="", **kwargs):
k,v = kwargs.popitem()
if v not in lst:
lst = ', '.join([f"'{e}'" for e in lst])
raise KeyError(f"Please chose the argment `{k}` from {lst}.\n\033[32m{msg_}\033[0m")
def handleTypeError(types, msg_="", **kwargs):
type2str = lambda t: re.sub(r"<class '(.*?)'>", r"\033[34m\1\033[0m", str(t))
k,v = kwargs.popitem()
if not any([isinstance(v,t) for t in types]):
str_true_types = ', '.join([type2str(t) for t in types])
srt_false_type = type2str(type(v))
if len(types)==1:
err_msg = f"must be {str_true_types}"
else:
err_msg = f"must be one of {str_true_types}"
raise TypeError(f"`{k}` {err_msg}, not {srt_false_type}.\n\033[32m{msg_}\033[0m")
def urlDecorate(url, addDate=True):
""" Decorate URL like Wget. (Add datetime information and coloring url to blue.) """
now = datetime.datetime.now().strftime("--%Y-%m-%d %H:%M:%S-- ") if addDate else ""
return now + priColor.color(url, color="BLUE")
def measure_complexity(func, *args, repetitions_=10, **kwargs):
times=0
metrics=[]
if "random_state" in kwargs:
base_seed = kwargs.get("random_state")
for i in range(repetitions_):
kwargs["random_state"] = base_seed+i
s = time.time()
ret = func(*args, **kwargs)
times += time.time()-s
metrics.append(ret)
else:
for _ in range(repetitions_):
s = time.time()
ret = func(*args, **kwargs)
times += time.time()-s
metrics.append(ret)
if metrics[0] is None:
return times/repetitions_
else:
return (times/repetitions_, metrics)
def has_not_attrs(obj, *names):
return [name for name in names if not hasattr(obj, name)]
def has_all_attrs(obj, *names):
return sum([1 for name in names if not hasattr(obj, name)])==0
def handleRandomState(seed):
""" Turn `np.random.RandomState` """
if seed is None:
return np.random.mtrand._rand
if isinstance(seed, np.random.RandomState):
return seed
if isinstance(seed, int):
return np.random.RandomState(seed)
raise ValueError(f"Could not conver {seed} to numpy.random.RandomState instance.")
def fout_args(*args, sep="\t"):
return sep.join([str(e) for e in args])+"\n"
f_aligns = ["<", ">", "=", "^"]
f_signs = ["+", "-", " ", ""]
f_grouping_options = ["_", ",", ""]
f_types = ["b", "c", "d", "e", "E", "f", "F", "g", "G", "n", "o", "s", "x", "X", "%"]
def format_spec_create(width=0, align=">", sign="", zero_padding=False,
grouping_option="", fmt=""):
"""
Create a function which returns a formatted text.
~~~~~
* Source Code : https://github.com/python/cpython/blob/3.8/Lib/string.py
* Documentation: https://docs.python.org/3/library/string.html#format-specification-mini-language
format_spec = [[fill]align][sign][#][0][width][grouping_option][.precision][type]
=========================
@params align : [[fill]align]
@params sign : [sign]
@params zero_padding : [0]
@params width : [width]
@params grouping_option : [grouping_option]
@params fmt : [.precision][type]
@return lambda : <function __main__.<lambda>(fill)>
"""
handleKeyError(lst=f_aligns, align=align)
handleKeyError(lst=f_signs, sign=sign)
handleKeyError(lst=f_grouping_options, grouping_option=grouping_option)
if len(fmt)>0:
handleKeyError(lst=f_types, fmt=fmt[-1])
zero = "0" if zero_padding else ""
handleTypeError(types=[int], width=width)
return lambda fill : f"{fill:{align}{sign}{zero}{width}{grouping_option}{fmt}}"
def print_func_create(width=0, align=">", sign="", zero_padding=False,
grouping_option="", fmt="", color="black",
left_side_bar="", right_side_bar="",
left_margin=0, right_margin=0, end="\n"):
"""
Create a function which prints a formatted text.
Please see also the function `format_spec_create`.
==============================
@params color : string color
@params left(right)_side_bar : (str)
@params left(right)_margin : (int)
@params end : string appended after the last value, default a newline.
@return lambda : <function __main__.<lambda>(fill)>
"""
format_spec = format_spec_create(width, align=align, sign=sign,
zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt)
def print_func(fill):
info = f"{left_side_bar}{" "*left_margin}"
info += priColor.color(format_spec(fill), color=color)
info += f"{" "*right_margin}{right_side_bar}"
print(info, end=end)
return print_func
class Table():
def __init__(self):
self.cols = {}
self.table_width = 1
self.head = None
def _disp_title(self):
for colname, options in self.cols.items():
if "print_values" not in options:
continue
print_func = options.get("print_title")
print_func(colname)
print("|")
def _disp_border(self, table_width=None, mark="="):
table_width = self.table_width if table_width is None else table_width
print(mark*table_width)
def _disp_values(self, head=None):
head = self.head if head is None else head
for i in range(head):
for colname, options in self.cols.items():
if "print_values" not in options:
continue
print_func = options.get("print_values")
values = options.get("values")
print_func(values[i])
print("|")
def show(self, head=None, table_width=None, mark="="):
self._disp_title()
self._disp_border(table_width=table_width, mark=mark)
self._disp_values(head=head)
def set_cols(self, colname, values, width=None, align=">", sign="",
zero_padding=False, grouping_option="", fmt="", color="black",
left_margin=0, right_margin=0):
title_width = len(str(colname))
if width is None:
format_spec = format_spec_create(
width=0, align=align, sign=sign, zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt
)
width = len(max([format_spec(v) for v in values], key=len))
width = max(width, title_width)
self.table_width += width + left_margin + right_margin + 1
print_values = print_func_create(
width=width, align=align, sign=sign, zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt, color=color,
left_side_bar="|", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
print_title = print_func_create(
width=width, align="^", sign="", zero_padding=False,
grouping_option="", fmt="", color="ACCENT",
left_side_bar="|", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
self.cols.update({colname: dict(
print_values=print_values, print_title=print_title, values=values
)})
if self.head is None:
self.head = len(values)
| # coding: utf-8
import os
import re
import sys
import time
import datetime
import numpy as np
from collections import defaultdict
def flatten_dual(lst):
return [e for sublist in lst for e in sublist]
def get_varname(var, scope_=globals()):
for varname,val in scope_.items():
if id(val)==id(var):
return varname
def disp_var_globals(*varnames, head_=True, align_=True, scope_=globals()):
"""
def func():
a = "hoge"
b = 123
c = [1,"1"]
disp_var_globals("a","b","c",scope=locals())
func()
>>> a: hoge
>>> b: 123
>>> c: [1, '1']
"""
if head_: print(f"#=== VARIABLE INFO ===")
digit = max([len(e) for e in varnames]) if align_ else 1
for var in varnames:
print(f"{var:<{digit}}: {scope_.get(var)}")
def disp_val_globals(*values, head_=True, align_=True, scope_=globals()):
"""
def func():
a = "hoge"
b = 123
c = [1,"1"]
disp_val_globals(a,b,c,scope=locals())
func()
>>> a: hoge
>>> b: 123
>>> c: [1, '1']
"""
if head_: print(f"#=== VARIABLE INFO ===")
names = [get_varname(val, scope_=scope_) for val in values]
digit = max([len(e) for e in names]) if align_ else 1
for name,val in zip(names, values):
print(f"{name:<{digit}}: {val}")
def disp_val_shapes(*values, head_=True, align_=True, scope_=globals()):
if head_: print(f"#=== ARRAY SHAPES ===")
names = [get_varname(val, scope_=scope_) for val in values]
digit = max([len(e) for e in names]) + 6 if align_ else 1
for name,val in zip(names, values):
print(f"{name+'.shape':<{digit}}: {val.shape}")
_UID_PREFIXES = defaultdict(int)
def get_uid(prefix=""):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
class priColor:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
PURPLE = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RETURN = '\033[07m' # 反転
ACCENT = '\033[01m' # 強調
FLASH = '\033[05m' # 点滅
RED_FLASH = '\033[05;41m' # 赤背景+点滅
END = '\033[0m'
@staticmethod
def color(value, color=None):
if color is None:
return str(value)
else:
color = color.upper()
handleKeyError(priColor.__dict__.keys(), color=color)
return f"{priColor.__dict__[color.upper()]}{value}{priColor.END}"
def handleKeyError(lst, msg_="", **kwargs):
k,v = kwargs.popitem()
if v not in lst:
lst = ', '.join([f"'{e}'" for e in lst])
raise KeyError(f"Please chose the argment `{k}` from {lst}.\n\033[32m{msg_}\033[0m")
def handleTypeError(types, msg_="", **kwargs):
type2str = lambda t: re.sub(r"<class '(.*?)'>", r"\033[34m\1\033[0m", str(t))
k,v = kwargs.popitem()
if not any([isinstance(v,t) for t in types]):
str_true_types = ', '.join([type2str(t) for t in types])
srt_false_type = type2str(type(v))
if len(types)==1:
err_msg = f"must be {str_true_types}"
else:
err_msg = f"must be one of {str_true_types}"
raise TypeError(f"`{k}` {err_msg}, not {srt_false_type}.\n\033[32m{msg_}\033[0m")
def urlDecorate(url, addDate=True):
""" Decorate URL like Wget. (Add datetime information and coloring url to blue.) """
now = datetime.datetime.now().strftime("--%Y-%m-%d %H:%M:%S-- ") if addDate else ""
return now + priColor.color(url, color="BLUE")
def measure_complexity(func, *args, repetitions_=10, **kwargs):
times=0
metrics=[]
if "random_state" in kwargs:
base_seed = kwargs.get("random_state")
for i in range(repetitions_):
kwargs["random_state"] = base_seed+i
s = time.time()
ret = func(*args, **kwargs)
times += time.time()-s
metrics.append(ret)
else:
for _ in range(repetitions_):
s = time.time()
ret = func(*args, **kwargs)
times += time.time()-s
metrics.append(ret)
if metrics[0] is None:
return times/repetitions_
else:
return (times/repetitions_, metrics)
def has_not_attrs(obj, *names):
return [name for name in names if not hasattr(obj, name)]
def has_all_attrs(obj, *names):
return sum([1 for name in names if not hasattr(obj, name)])==0
def handleRandomState(seed):
""" Turn `np.random.RandomState` """
if seed is None:
return np.random.mtrand._rand
if isinstance(seed, np.random.RandomState):
return seed
if isinstance(seed, int):
return np.random.RandomState(seed)
raise ValueError(f"Could not conver {seed} to numpy.random.RandomState instance.")
def fout_args(*args, sep="\t"):
return sep.join([str(e) for e in args])+"\n"
f_aligns = ["<", ">", "=", "^"]
f_signs = ["+", "-", " ", ""]
f_grouping_options = ["_", ",", ""]
f_types = ["b", "c", "d", "e", "E", "f", "F", "g", "G", "n", "o", "s", "x", "X", "%"]
def format_spec_create(width=0, align=">", sign="", zero_padding=False,
grouping_option="", fmt=""):
"""
Create a function which returns a formatted text.
~~~~~
* Source Code : https://github.com/python/cpython/blob/3.8/Lib/string.py
* Documentation: https://docs.python.org/3/library/string.html#format-specification-mini-language
format_spec = [[fill]align][sign][#][0][width][grouping_option][.precision][type]
=========================
@params align : [[fill]align]
@params sign : [sign]
@params zero_padding : [0]
@params width : [width]
@params grouping_option : [grouping_option]
@params fmt : [.precision][type]
@return lambda : <function __main__.<lambda>(fill)>
"""
handleKeyError(lst=f_aligns, align=align)
handleKeyError(lst=f_signs, sign=sign)
handleKeyError(lst=f_grouping_options, grouping_option=grouping_option)
if len(fmt)>0:
handleKeyError(lst=f_types, fmt=fmt[-1])
zero = "0" if zero_padding else ""
handleTypeError(types=[int], width=width)
return lambda fill : f"{fill:{align}{sign}{zero}{width}{grouping_option}{fmt}}"
def print_func_create(width=0, align=">", sign="", zero_padding=False,
grouping_option="", fmt="", color="black",
left_side_bar="", right_side_bar="",
left_margin=0, right_margin=0, end="\n"):
"""
Create a function which prints a formatted text.
Please see also the function `format_spec_create`.
==============================
@params color : string color
@params left(right)_side_bar : (str)
@params left(right)_margin : (int)
@params end : string appended after the last value, default a newline.
@return lambda : <function __main__.<lambda>(fill)>
"""
format_spec = format_spec_create(width, align=align, sign=sign,
zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt)
def print_func(fill):
info = f"{left_side_bar}{' '*left_margin}"
info += priColor.color(format_spec(fill), color=color)
info += f"{' '*right_margin}{right_side_bar}"
print(info, end=end)
return print_func
class Table():
def __init__(self):
self.cols = {}
self.table_width = 1
self.head = None
def _disp_title(self):
for colname, options in self.cols.items():
if "print_values" not in options:
continue
print_func = options.get("print_title")
print_func(colname)
print("|")
def _disp_border(self, table_width=None, mark="="):
table_width = self.table_width if table_width is None else table_width
print(mark*table_width)
def _disp_values(self, head=None):
head = self.head if head is None else head
for i in range(head):
for colname, options in self.cols.items():
if "print_values" not in options:
continue
print_func = options.get("print_values")
values = options.get("values")
print_func(values[i])
print("|")
def show(self, head=None, table_width=None, mark="="):
self._disp_title()
self._disp_border(table_width=table_width, mark=mark)
self._disp_values(head=head)
def set_cols(self, colname, values, width=None, align=">", sign="",
zero_padding=False, grouping_option="", fmt="", color="black",
left_margin=0, right_margin=0):
title_width = len(str(colname))
if width is None:
format_spec = format_spec_create(
width=0, align=align, sign=sign, zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt
)
width = len(max([format_spec(v) for v in values], key=len))
width = max(width, title_width)
self.table_width += width + left_margin + right_margin + 1
print_values = print_func_create(
width=width, align=align, sign=sign, zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt, color=color,
left_side_bar="|", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
print_title = print_func_create(
width=width, align="^", sign="", zero_padding=False,
grouping_option="", fmt="", color="ACCENT",
left_side_bar="|", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
self.cols.update({colname: dict(
print_values=print_values, print_title=print_title, values=values
)})
if self.head is None:
self.head = len(values)
|
import requests
import json
import os
import urllib3
from wakeonlan import send_magic_packet
# REFERENCE: https://github.com/exiva/Vizio_SmartCast_API
class VizioController(object):
def __init__(self):
# Vizio's API is completely insecure, but it's also local only, so...
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.auth_token = os.environ['VIZIO_AUTH_TOKEN']
self.ip = os.environ['VIZIO_IP_ADDRESS']
self.port = os.environ['VIZIO_PORT']
self.mac = os.environ['VIZIO_MAC_ADDRESS']
self.power_keys = {'off': 0, 'on': 1, 'toggle': 2}
self.headers = {
'Content-Type': 'application/json',
'AUTH': self.auth_token
}
def _build_url(self, parts: list) -> str:
return f'https://{self.ip}:{self.port}/{'/'.join(parts)}'
def _call(self, method: str, parts: list, body={}) -> requests.Response:
try:
if method == 'GET':
response = requests.get(
url=self._build_url(parts),
headers=self.headers,
verify=False
)
elif method == 'PUT':
response = requests.put(
url=self._build_url(parts),
headers=self.headers,
data=json.dumps(body),
verify=False
)
return response
except requests.exceptions.ConnectionError as e:
print("ERROR: Couldn't connect to Vizio TV")
self.log_exception(e)
return None
def _get_power_state(self) -> requests.Response:
return self._call('GET', ['state', 'device', 'power_mode'])
def _power_key(self, state: str) -> requests.Response:
body = {
'KEYLIST': [{
'CODESET': 11,
'CODE': self.power_keys[state],
'ACTION': 'KEYPRESS'
}]
}
return self._call('PUT', ['key_command', ''], body)
def turn_on(self):
send_magic_packet(self.mac)
self._power_key(state='on')
def turn_off(self):
self._power_key(state='off')
def toggle_power(self):
self._power_key(state='toggle')
def _get_all_input_names(self) -> list:
response = self._call(
'GET',
['menu_native', 'dynamic', 'tv_settings', 'devices', 'name_input']
)
if response and response.status_code == 200:
return [item['NAME'] for item in response.json()['ITEMS']]
else:
return []
def _get_current_input(self) -> dict:
response = self._call('GET', [
'menu_native', 'dynamic', 'tv_settings', 'devices', 'current_input'
])
if response.status_code == 200:
input = response.json()['ITEMS'][0]
return {'value': input['VALUE'], 'hash': input['HASHVAL']}
else:
return {}
def switch_input(self, input_name: str) -> requests.Response:
if input_name not in self._get_all_input_names():
return None
current = self._get_current_input()
if 'hash' not in current.keys():
return None
return self._call(
method='PUT',
parts=[
'menu_native',
'dynamic',
'tv_settings',
'devices',
'current_input'
],
body={
'REQUEST': 'MODIFY',
'VALUE': input_name,
'HASHVAL': current['hash']
}
)
| import requests
import json
import os
import urllib3
from wakeonlan import send_magic_packet
# REFERENCE: https://github.com/exiva/Vizio_SmartCast_API
class VizioController(object):
def __init__(self):
# Vizio's API is completely insecure, but it's also local only, so...
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.auth_token = os.environ['VIZIO_AUTH_TOKEN']
self.ip = os.environ['VIZIO_IP_ADDRESS']
self.port = os.environ['VIZIO_PORT']
self.mac = os.environ['VIZIO_MAC_ADDRESS']
self.power_keys = {'off': 0, 'on': 1, 'toggle': 2}
self.headers = {
'Content-Type': 'application/json',
'AUTH': self.auth_token
}
def _build_url(self, parts: list) -> str:
return f'https://{self.ip}:{self.port}/{"/".join(parts)}'
def _call(self, method: str, parts: list, body={}) -> requests.Response:
try:
if method == 'GET':
response = requests.get(
url=self._build_url(parts),
headers=self.headers,
verify=False
)
elif method == 'PUT':
response = requests.put(
url=self._build_url(parts),
headers=self.headers,
data=json.dumps(body),
verify=False
)
return response
except requests.exceptions.ConnectionError as e:
print("ERROR: Couldn't connect to Vizio TV")
self.log_exception(e)
return None
def _get_power_state(self) -> requests.Response:
return self._call('GET', ['state', 'device', 'power_mode'])
def _power_key(self, state: str) -> requests.Response:
body = {
'KEYLIST': [{
'CODESET': 11,
'CODE': self.power_keys[state],
'ACTION': 'KEYPRESS'
}]
}
return self._call('PUT', ['key_command', ''], body)
def turn_on(self):
send_magic_packet(self.mac)
self._power_key(state='on')
def turn_off(self):
self._power_key(state='off')
def toggle_power(self):
self._power_key(state='toggle')
def _get_all_input_names(self) -> list:
response = self._call(
'GET',
['menu_native', 'dynamic', 'tv_settings', 'devices', 'name_input']
)
if response and response.status_code == 200:
return [item['NAME'] for item in response.json()['ITEMS']]
else:
return []
def _get_current_input(self) -> dict:
response = self._call('GET', [
'menu_native', 'dynamic', 'tv_settings', 'devices', 'current_input'
])
if response.status_code == 200:
input = response.json()['ITEMS'][0]
return {'value': input['VALUE'], 'hash': input['HASHVAL']}
else:
return {}
def switch_input(self, input_name: str) -> requests.Response:
if input_name not in self._get_all_input_names():
return None
current = self._get_current_input()
if 'hash' not in current.keys():
return None
return self._call(
method='PUT',
parts=[
'menu_native',
'dynamic',
'tv_settings',
'devices',
'current_input'
],
body={
'REQUEST': 'MODIFY',
'VALUE': input_name,
'HASHVAL': current['hash']
}
)
|
from utils import config, files
class Source:
def __init__(self, name, roles=True):
"""
Template for source classes.
The source should implement the methods described below, with the correct return values.
Parameters:
- name (str): Name of source. Gets used in file path. Should be all lowercase, letters only.
- roles (bool): If source categorizes item sets (builds) by role
"""
self.name = name
self.roles = roles
def get_champions(self):
"""
- Returns a list of champions in dictonary format
- Returned dictionary format:
{
"name": str,
"display_name": str,
"id": str,
"roles": list [OPTIONAL]
}
- Roles doesn't need to be included if source does not categorize item sets by role
"""
raise NotImplementedError
def get_items(self, champion, role):
"""
Parameters:
- champion (dict): dict returned from get_champions
- OPTIONAL: role (str): role to get items from, if self.roles == True
Returns:
- Dictionary with items for this champion and role
- Returned dictionary format:
{
"frequent": {
"full": list,
"starters": list
},
"highest": {
"full": list,
"starters": list
}
}
"""
raise NotImplementedError
def get_skill_order(self, champion, role):
"""
Parameters:
- champion (dict): dict returned from get_champions
- OPTIONAL: role (str): role to get skill order from, if self.roles == True
Returns:
- Dictionary with skill order for this champion and role
- Returned dictionary format:
{
skill_order = {
"frequent": list,
"highest": list
}
}
-List example (length 18): ["Q", "W", "E", ..., "R", "E", "E"]
"""
raise NotImplementedError
def get_version(self):
"""
Returns:
- Current version of the source in string format, e.g. "10.14" or "2020.10.20"
"""
raise NotImplementedError
def get_item_sets(self, champion):
""" Gets item sets with items and skill order for every role (if supported) for a champion """
item_sets = []
# roles supported, create an item set per role
if self.roles:
for counter, role in enumerate(champion["roles"]):
try:
items = self.get_items(champion, role)
skill_order = self.get_skill_order(champion, role)
item_set = {
"role": role,
# add sort_rank if roles are supported, higher sort rank equals higher position in the item set list in-game
"sort_rank": 10 - counter,
"frequent": {
"full": items["frequent"]["full"],
"starters": items["frequent"]["starters"],
"skill_order": skill_order["frequent"]
},
"highest": {
"full": items["highest"]["full"],
"starters": items["highest"]["starters"],
"skill_order": skill_order["highest"]
}
}
item_sets.append(item_set)
except:
print(
f"ERROR: Build for {champion["display_name"]} {role} not found on {self.name}")
# roles not supported, create only one item set
else:
try:
items = self.get_items(champion)
skill_order = self.get_skill_order(champion)
item_set = {
"frequent": {
"full": items["frequent"]["full"],
"starters": items["frequent"]["starters"],
"skill_order": skill_order["frequent"]
},
"highest": {
"full": items["highest"]["full"],
"starters": items["highest"]["starters"],
"skill_order": skill_order["highest"]
}
}
item_sets.append(item_set)
except:
print(
f"ERROR: Build for {champion["display_name"]} not found on {self.name}")
return item_sets
def import_item_sets(self):
""" Imports all item sets for all champions and roles """
# first remove old item sets
self.delete_item_sets()
champions = self.get_champions()
version = self.get_version()
config.save(self.name, version)
for champion in champions:
print(
f"Importing {champion["display_name"]}'s item sets from {self.name}...")
item_sets = self.get_item_sets(champion)
for item_set in item_sets:
files.save(champion, item_set, version, self.name, self.roles)
def delete_item_sets(self):
""" Deletes all item sets for all champions and roles generated by import_item_sets """
print(f"Deleting item sets from {self.name}")
config.save(self.name, None)
try:
champions = self.get_champions()
except:
print(
f"ERROR: Could not get the champion list from {self.name}. This likely happened because of a bad connection to {self.name}, or because {self.name} changed something on their site.")
return
for champion in champions:
files.delete(champion, self.name)
| from utils import config, files
class Source:
def __init__(self, name, roles=True):
"""
Template for source classes.
The source should implement the methods described below, with the correct return values.
Parameters:
- name (str): Name of source. Gets used in file path. Should be all lowercase, letters only.
- roles (bool): If source categorizes item sets (builds) by role
"""
self.name = name
self.roles = roles
def get_champions(self):
"""
- Returns a list of champions in dictonary format
- Returned dictionary format:
{
"name": str,
"display_name": str,
"id": str,
"roles": list [OPTIONAL]
}
- Roles doesn't need to be included if source does not categorize item sets by role
"""
raise NotImplementedError
def get_items(self, champion, role):
"""
Parameters:
- champion (dict): dict returned from get_champions
- OPTIONAL: role (str): role to get items from, if self.roles == True
Returns:
- Dictionary with items for this champion and role
- Returned dictionary format:
{
"frequent": {
"full": list,
"starters": list
},
"highest": {
"full": list,
"starters": list
}
}
"""
raise NotImplementedError
def get_skill_order(self, champion, role):
"""
Parameters:
- champion (dict): dict returned from get_champions
- OPTIONAL: role (str): role to get skill order from, if self.roles == True
Returns:
- Dictionary with skill order for this champion and role
- Returned dictionary format:
{
skill_order = {
"frequent": list,
"highest": list
}
}
-List example (length 18): ["Q", "W", "E", ..., "R", "E", "E"]
"""
raise NotImplementedError
def get_version(self):
"""
Returns:
- Current version of the source in string format, e.g. "10.14" or "2020.10.20"
"""
raise NotImplementedError
def get_item_sets(self, champion):
""" Gets item sets with items and skill order for every role (if supported) for a champion """
item_sets = []
# roles supported, create an item set per role
if self.roles:
for counter, role in enumerate(champion["roles"]):
try:
items = self.get_items(champion, role)
skill_order = self.get_skill_order(champion, role)
item_set = {
"role": role,
# add sort_rank if roles are supported, higher sort rank equals higher position in the item set list in-game
"sort_rank": 10 - counter,
"frequent": {
"full": items["frequent"]["full"],
"starters": items["frequent"]["starters"],
"skill_order": skill_order["frequent"]
},
"highest": {
"full": items["highest"]["full"],
"starters": items["highest"]["starters"],
"skill_order": skill_order["highest"]
}
}
item_sets.append(item_set)
except:
print(
f"ERROR: Build for {champion['display_name']} {role} not found on {self.name}")
# roles not supported, create only one item set
else:
try:
items = self.get_items(champion)
skill_order = self.get_skill_order(champion)
item_set = {
"frequent": {
"full": items["frequent"]["full"],
"starters": items["frequent"]["starters"],
"skill_order": skill_order["frequent"]
},
"highest": {
"full": items["highest"]["full"],
"starters": items["highest"]["starters"],
"skill_order": skill_order["highest"]
}
}
item_sets.append(item_set)
except:
print(
f"ERROR: Build for {champion['display_name']} not found on {self.name}")
return item_sets
def import_item_sets(self):
""" Imports all item sets for all champions and roles """
# first remove old item sets
self.delete_item_sets()
champions = self.get_champions()
version = self.get_version()
config.save(self.name, version)
for champion in champions:
print(
f"Importing {champion['display_name']}'s item sets from {self.name}...")
item_sets = self.get_item_sets(champion)
for item_set in item_sets:
files.save(champion, item_set, version, self.name, self.roles)
def delete_item_sets(self):
""" Deletes all item sets for all champions and roles generated by import_item_sets """
print(f"Deleting item sets from {self.name}")
config.save(self.name, None)
try:
champions = self.get_champions()
except:
print(
f"ERROR: Could not get the champion list from {self.name}. This likely happened because of a bad connection to {self.name}, or because {self.name} changed something on their site.")
return
for champion in champions:
files.delete(champion, self.name)
|
import subprocess
import sys
from abc import ABC
from collections import OrderedDict
from hashlib import sha256
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
from packaging import version
import base64
import fnmatch
import functools
import inspect
import json
import os
import pytest
import time
import threading
import traceback
from .utils import ShellCommand, namespace_manifest
from ambassador.utils import parse_bool
from yaml.scanner import ScannerError as YAMLScanError
from multi import multi
from .parser import dump, load, Tag
from tests.manifests import httpbin_manifests, websocket_echo_server_manifests, cleartext_host_manifest
from tests.kubeutils import apply_kube_artifacts
import yaml as pyyaml
pyyaml_loader: Any = pyyaml.SafeLoader
pyyaml_dumper: Any = pyyaml.SafeDumper
try:
pyyaml_loader = pyyaml.CSafeLoader
pyyaml_dumper = pyyaml.CSafeDumper
except AttributeError:
pass
# Run mode can be local (don't do any Envoy stuff), envoy (only do Envoy stuff),
# or all (allow both). Default is all.
RUN_MODE = os.environ.get('KAT_RUN_MODE', 'all').lower()
# We may have a SOURCE_ROOT override from the environment
SOURCE_ROOT = os.environ.get('SOURCE_ROOT', '')
# Figure out if we're running in Edge Stack or what.
if os.path.exists("/buildroot/apro.version"):
# We let /buildroot/apro.version remain a source of truth to minimize the
# chances that we break anything that currently uses the builder shell.
EDGE_STACK = True
else:
# If we do not see concrete evidence of running in an apro builder shell,
# then try to decide if the user wants us to assume we're running Edge Stack
# from an environment variable. And if that isn't set, just assume OSS.
EDGE_STACK = parse_bool(os.environ.get('EDGE_STACK', 'false'))
if EDGE_STACK:
# Hey look, we're running inside Edge Stack!
print("RUNNING IN EDGE STACK")
# SOURCE_ROOT is optional, and we assume that if it isn't set, the user is
# running in a build shell and we should look for sources in the usual location.
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/apro"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/gold")
MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/manifests")
else:
# We're either not running in Edge Stack or we're not sure, so just assume OSS.
print("RUNNING IN OSS")
# SOURCE_ROOT is optional, and we assume that if it isn't set, the user is
# running in a build shell and we should look for sources in the usual location.
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/ambassador"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "python/tests/gold")
MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "python/tests/integration/manifests")
def load_manifest(manifest_name: str) -> str:
return open(os.path.join(MANIFEST_ROOT, f"{manifest_name.lower()}.yaml"), "r").read()
class TestImage:
def __init__(self, *args, **kwargs) -> None:
self.images: Dict[str, str] = {}
default_registry = os.environ.get('TEST_SERVICE_REGISTRY', 'docker.io/datawire/test_services')
default_version = os.environ.get('TEST_SERVICE_VERSION', '0.0.3')
for svc in ['auth', 'auth-tls', 'ratelimit', 'shadow', 'stats']:
key = svc.replace('-', '_').upper()
image = os.environ.get(f'TEST_SERVICE_{key}', f'{default_registry}:test-{svc}-{default_version}')
self.images[svc] = image
def __getitem__(self, key: str) -> str:
return self.images[key]
GLOBAL_TEST_IMAGE = TestImage()
def run(cmd):
status = os.system(cmd)
if status != 0:
raise RuntimeError("command failed[%s]: %s" % (status, cmd))
def kube_version_json():
result = subprocess.Popen('kubectl version -o json', stdout=subprocess.PIPE, shell=True)
stdout, _ = result.communicate()
return json.loads(stdout)
def strip_version(ver: str):
"""
strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE,
`kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter.
So we handle exceptions like that here.
:param ver: version string
:return: stripped version
"""
try:
return int(ver)
except ValueError as e:
# GKE returns weird versions with '+' in the end
if ver[-1] == '+':
return int(ver[:-1])
# If we still have not taken care of this, raise the error
raise ValueError(e)
def kube_server_version(version_json=None):
if not version_json:
version_json = kube_version_json()
server_json = version_json.get('serverVersion', {})
if server_json:
server_major = strip_version(server_json.get('major', None))
server_minor = strip_version(server_json.get('minor', None))
return f"{server_major}.{server_minor}"
else:
return None
def kube_client_version(version_json=None):
if not version_json:
version_json = kube_version_json()
client_json = version_json.get('clientVersion', {})
if client_json:
client_major = strip_version(client_json.get('major', None))
client_minor = strip_version(client_json.get('minor', None))
return f"{client_major}.{client_minor}"
else:
return None
def is_kube_server_client_compatible(debug_desc: str, requested_server_version: str, requested_client_version: str) -> bool:
is_cluster_compatible = True
kube_json = kube_version_json()
server_version = kube_server_version(kube_json)
client_version = kube_client_version(kube_json)
if server_version:
if version.parse(server_version) < version.parse(requested_server_version):
print(f"server version {server_version} is incompatible with {debug_desc}")
is_cluster_compatible = False
else:
print(f"server version {server_version} is compatible with {debug_desc}")
else:
print("could not determine Kubernetes server version?")
if client_version:
if version.parse(client_version) < version.parse(requested_client_version):
print(f"client version {client_version} is incompatible with {debug_desc}")
is_cluster_compatible = False
else:
print(f"client version {client_version} is compatible with {debug_desc}")
else:
print("could not determine Kubernetes client version?")
return is_cluster_compatible
def is_ingress_class_compatible() -> bool:
return is_kube_server_client_compatible('IngressClass', '1.18', '1.14')
def is_knative_compatible() -> bool:
# Skip KNative immediately for run_mode local.
if RUN_MODE == 'local':
return False
return is_kube_server_client_compatible('Knative', '1.14', '1.14')
def get_digest(data: str) -> str:
s = sha256()
s.update(data.encode('utf-8'))
return s.hexdigest()
def has_changed(data: str, path: str) -> Tuple[bool, str]:
cur_size = len(data.strip()) if data else 0
cur_hash = get_digest(data)
# print(f'has_changed: data size {cur_size} - {cur_hash}')
prev_data = None
changed = True
reason = f'no {path} present'
if os.path.exists(path):
with open(path) as f:
prev_data = f.read()
prev_size = len(prev_data.strip()) if prev_data else 0
prev_hash = None
if prev_data:
prev_hash = get_digest(prev_data)
# print(f'has_changed: prev_data size {prev_size} - {prev_hash}')
if data:
if data != prev_data:
reason = f'different data in {path}'
else:
changed = False
reason = f'same data in {path}'
if changed:
# print(f'has_changed: updating {path}')
with open(path, "w") as f:
f.write(data)
# For now, we always have to reapply with split testing.
if not changed:
changed = True
reason = 'always reapply for split test'
return (changed, reason)
COUNTERS: Dict[Type, int] = {}
SANITIZATIONS = OrderedDict((
("://", "SCHEME"),
(":", "COLON"),
(" ", "SPACE"),
("/t", "TAB"),
(".", "DOT"),
("?", "QMARK"),
("/", "SLASH"),
))
def sanitize(obj):
if isinstance(obj, str):
for k, v in SANITIZATIONS.items():
if obj.startswith(k):
obj = obj.replace(k, v + "-")
elif obj.endswith(k):
obj = obj.replace(k, "-" + v)
else:
obj = obj.replace(k, "-" + v + "-")
return obj
elif isinstance(obj, dict):
if 'value' in obj:
return obj['value']
else:
return "-".join("%s-%s" % (sanitize(k), sanitize(v)) for k, v in sorted(obj.items()))
else:
cls = obj.__class__
count = COUNTERS.get(cls, 0)
COUNTERS[cls] = count + 1
if count == 0:
return cls.__name__
else:
return "%s-%s" % (cls.__name__, count)
def abstract_test(cls: type):
cls.abstract_test = True # type: ignore
return cls
def get_nodes(node_type: type):
if not inspect.isabstract(node_type) and not node_type.__dict__.get("abstract_test", False):
yield node_type
for sc in node_type.__subclasses__():
if not sc.__dict__.get("skip_variant", False):
for ssc in get_nodes(sc):
yield ssc
def variants(cls, *args, **kwargs) -> Tuple[Any]:
return tuple(a for n in get_nodes(cls) for a in n.variants(*args, **kwargs)) # type: ignore
class Name(str):
def __new__(cls, value, namespace=None):
s = super().__new__(cls, value)
s.namespace = namespace
return s
@property
def k8s(self):
return self.replace(".", "-").lower()
@property
def fqdn(self):
r = self.k8s
if self.namespace and (self.namespace != 'default'):
r += '.' + self.namespace
return r
class NodeLocal(threading.local):
def __init__(self):
self.current = None
_local = NodeLocal()
def _argprocess(o):
if isinstance(o, Node):
return o.clone()
elif isinstance(o, tuple):
return tuple(_argprocess(i) for i in o)
elif isinstance(o, list):
return [_argprocess(i) for i in o]
elif isinstance(o, dict):
return {_argprocess(k): _argprocess(v) for k, v in o.items()}
else:
return o
class Node(ABC):
parent: 'Node'
children: List['Node']
name: Name
ambassador_id: str
namespace: str = None # type: ignore
is_ambassador = False
local_result: Optional[Dict[str, str]] = None
def __init__(self, *args, **kwargs) -> None:
# If self.skip is set to true, this node is skipped
self.skip_node = False
self.xfail = None
self.test_image = GLOBAL_TEST_IMAGE
name = kwargs.pop("name", None)
if 'namespace' in kwargs:
self.namespace = kwargs.pop('namespace', None)
_clone: Node = kwargs.pop("_clone", None)
if _clone:
args = _clone._args # type: ignore
kwargs = _clone._kwargs # type: ignore
if name:
name = Name("-".join((_clone.name, name)))
else:
name = _clone.name
self._args = _clone._args # type: ignore
self._kwargs = _clone._kwargs # type: ignore
else:
self._args = args
self._kwargs = kwargs
if name:
name = Name("-".join((self.__class__.__name__, name)))
else:
name = Name(self.__class__.__name__)
saved = _local.current
self.parent = _local.current
if not self.namespace:
if self.parent and self.parent.namespace:
# We have no namespace assigned, but our parent does have a namespace
# defined. Copy the namespace down from our parent.
self.namespace = self.parent.namespace
else:
self.namespace = "default"
_local.current = self
self.children = []
if self.parent is not None:
self.parent.children.append(self)
try:
init = getattr(self, "init", lambda *a, **kw: None)
init(*_argprocess(args), **_argprocess(kwargs))
finally:
_local.current = saved
self.name = Name(self.format(name or self.__class__.__name__))
names = {} # type: ignore
for c in self.children:
assert c.name not in names, ("test %s of type %s has duplicate children: %s of type %s, %s" %
(self.name, self.__class__.__name__, c.name, c.__class__.__name__,
names[c.name].__class__.__name__))
names[c.name] = c
def clone(self, name=None):
return self.__class__(_clone=self, name=name)
def find_local_result(self, stop_at_first_ambassador: bool=False) -> Optional[Dict[str, str]]:
test_name = self.format('{self.path.k8s}')
# print(f"{test_name} {type(self)} FIND_LOCAL_RESULT")
end_result: Optional[Dict[str, str]] = None
n: Optional[Node] = self
while n:
node_name = n.format('{self.path.k8s}')
parent = n.parent
parent_name = parent.format('{self.path.k8s}') if parent else "-none-"
end_result = getattr(n, 'local_result', None)
result_str = end_result['result'] if end_result else '-none-'
# print(f"{test_name}: {"ambassador" if n.is_ambassador else "node"} {node_name}, parent {parent_name}, local_result = {result_str}")
if end_result is not None:
break
if n.is_ambassador and stop_at_first_ambassador:
# This is an Ambassador: don't continue past it.
break
n = n.parent
return end_result
def check_local(self, gold_root: str, k8s_yaml_path: str) -> Tuple[bool, bool]:
testname = self.format('{self.path.k8s}')
if self.xfail:
# XFail early -- but still return True, True so that we don't try to run Envoy on it.
self.local_result = {
'result': 'xfail',
'reason': self.xfail
}
# print(f"==== XFAIL: {testname} local: {self.xfail}")
return (True, True)
if not self.is_ambassador:
# print(f"{testname} ({type(self)}) is not an Ambassador")
return (False, False)
if not self.ambassador_id:
print(f"{testname} ({type(self)}) is an Ambassador but has no ambassador_id?")
return (False, False)
ambassador_namespace = getattr(self, 'namespace', 'default')
ambassador_single_namespace = getattr(self, 'single_namespace', False)
no_local_mode: bool = getattr(self, 'no_local_mode', False)
skip_local_reason: Optional[str] = getattr(self, 'skip_local_instead_of_xfail', None)
# print(f"{testname}: ns {ambassador_namespace} ({"single" if ambassador_single_namespace else "multi"})")
gold_path = os.path.join(gold_root, testname)
if os.path.isdir(gold_path) and not no_local_mode:
# print(f"==== {testname} running locally from {gold_path}")
# Yeah, I know, brutal hack.
#
# XXX (Flynn) This code isn't used and we don't know if it works. If you try
# it, bring it up-to-date with the environment created in abstract_tests.py
envstuff = ["env", f"AMBASSADOR_NAMESPACE={ambassador_namespace}"]
cmd = ["mockery", "--debug", k8s_yaml_path,
"-w", "python /ambassador/watch_hook.py",
"--kat", self.ambassador_id,
"--diff", gold_path]
if ambassador_single_namespace:
envstuff.append("AMBASSADOR_SINGLE_NAMESPACE=yes")
cmd += ["-n", ambassador_namespace]
if not getattr(self, 'allow_edge_stack_redirect', False):
envstuff.append("AMBASSADOR_NO_TLS_REDIRECT=yes")
cmd = envstuff + cmd
w = ShellCommand(*cmd)
if w.status():
print(f"==== GOOD: {testname} local against {gold_path}")
self.local_result = {'result': "pass"}
else:
print(f"==== FAIL: {testname} local against {gold_path}")
self.local_result = {
'result': 'fail',
'stdout': w.stdout,
'stderr': w.stderr
}
return (True, True)
else:
# If we have a local reason, has a parent already subsumed us?
#
# XXX The way KAT works, our parent will have always run earlier than us, so
# it's not clear if we can ever not have been subsumed.
if skip_local_reason:
local_result = self.find_local_result()
if local_result:
self.local_result = {
'result': 'skip',
'reason': f"subsumed by {skip_local_reason} -- {local_result["result"]}"
}
# print(f"==== {self.local_result["result"].upper()} {testname} {self.local_result["reason"]}")
return (True, True)
# OK, we weren't already subsumed. If we're in local mode, we'll skip or xfail
# depending on skip_local_reason.
if RUN_MODE == "local":
if skip_local_reason:
self.local_result = {
'result': 'skip',
# 'reason': f"subsumed by {skip_local_reason} without result in local mode"
}
print(f"==== {self.local_result["result"].upper()} {testname} {self.local_result["reason"]}")
return (True, True)
else:
# XFail -- but still return True, True so that we don't try to run Envoy on it.
self.local_result = {
'result': 'xfail',
'reason': f"missing local cache {gold_path}"
}
# print(f"==== {self.local_result["result"].upper()} {testname} {self.local_result["reason"]}")
return (True, True)
# If here, we're not in local mode. Allow Envoy to run.
self.local_result = None
# print(f"==== IGNORE {testname} no local cache")
return (True, False)
def has_local_result(self) -> bool:
return bool(self.local_result)
@classmethod
def variants(cls):
yield cls()
@property
def path(self) -> str:
return self.relpath(None)
def relpath(self, ancestor):
if self.parent is ancestor:
return Name(self.name, namespace=self.namespace)
else:
return Name(self.parent.relpath(ancestor) + "." + self.name, namespace=self.namespace)
@property
def traversal(self):
yield self
for c in self.children:
for d in c.traversal:
yield d
@property
def ancestors(self):
yield self
if self.parent is not None:
for a in self.parent.ancestors:
yield a
@property
def depth(self):
if self.parent is None:
return 0
else:
return self.parent.depth + 1
def format(self, st, **kwargs):
serviceAccountExtra = ''
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
serviceAccountExtra = """
imagePullSecrets:
- name: dev-image-pull-secret
"""
return st.format(self=self, environ=os.environ, serviceAccountExtra=serviceAccountExtra, **kwargs)
def get_fqdn(self, name: str) -> str:
if self.namespace and (self.namespace != 'default'):
return f'{name}.{self.namespace}'
else:
return name
@functools.lru_cache()
def matches(self, pattern):
if fnmatch.fnmatch(self.path, "*%s*" % pattern):
return True
for c in self.children:
if c.matches(pattern):
return True
return False
def requirements(self):
yield from ()
# log_kube_artifacts writes various logs about our underlying Kubernetes objects to
# a place where the artifact publisher can find them. See run-tests.sh.
def log_kube_artifacts(self):
if not getattr(self, 'already_logged', False):
self.already_logged = True
print(f'logging kube artifacts for {self.path.k8s}')
sys.stdout.flush()
DEV = os.environ.get("AMBASSADOR_DEV", "0").lower() in ("1", "yes", "true")
log_path = f'/tmp/kat-logs-{self.path.k8s}'
if DEV:
os.system(f'docker logs {self.path.k8s} >{log_path} 2>&1')
else:
os.system(f'kubectl logs -n {self.namespace} {self.path.k8s} >{log_path} 2>&1')
event_path = f'/tmp/kat-events-{self.path.k8s}'
fs1 = f'involvedObject.name={self.path.k8s}'
fs2 = f'involvedObject.namespace={self.namespace}'
cmd = f'kubectl get events -o json --field-selector "{fs1}" --field-selector "{fs2}"'
os.system(f'echo ==== "{cmd}" >{event_path}')
os.system(f'{cmd} >>{event_path} 2>&1')
class Test(Node):
results: Sequence['Result']
__test__ = False
def config(self):
yield from ()
def manifests(self):
return None
def queries(self):
yield from ()
def check(self):
pass
def handle_local_result(self) -> bool:
test_name = self.format('{self.path.k8s}')
# print(f"{test_name} {type(self)} HANDLE_LOCAL_RESULT")
end_result = self.find_local_result()
if end_result is not None:
result_type = end_result['result']
if result_type == 'pass':
pass
elif result_type == 'skip':
pytest.skip(end_result['reason'])
elif result_type == 'fail':
sys.stdout.write(end_result['stdout'])
if os.environ.get('KAT_VERBOSE', None):
sys.stderr.write(end_result['stderr'])
pytest.fail("local check failed")
elif result_type == 'xfail':
pytest.xfail(end_result['reason'])
return True
return False
@property
def ambassador_id(self):
if self.parent is None:
return self.name.k8s
else:
return self.parent.ambassador_id
@multi
def encode_body(obj):
yield type(obj)
@encode_body.when(bytes) # type: ignore
def encode_body(b):
return base64.encodebytes(b).decode("utf-8")
@encode_body.when(str) # type: ignore
def encode_body(s):
return encode_body(s.encode("utf-8"))
@encode_body.default # type: ignore
def encode_body(obj):
return encode_body(json.dumps(obj))
class Query:
def __init__(self, url, expected=None, method="GET", headers=None, messages=None, insecure=False, skip=None,
xfail=None, phase=1, debug=False, sni=False, error=None, client_crt=None, client_key=None,
client_cert_required=False, ca_cert=None, grpc_type=None, cookies=None, ignore_result=False, body=None,
minTLSv="", maxTLSv="", cipherSuites=[], ecdhCurves=[]):
self.method = method
self.url = url
self.headers = headers
self.body = body
self.cookies = cookies
self.messages = messages
self.insecure = insecure
self.minTLSv = minTLSv
self.maxTLSv = maxTLSv
self.cipherSuites = cipherSuites
self.ecdhCurves = ecdhCurves
if expected is None:
if url.lower().startswith("ws:"):
self.expected = 101
else:
self.expected = 200
else:
self.expected = expected
self.skip = skip
self.xfail = xfail
self.ignore_result = ignore_result
self.phase = phase
self.parent = None
self.result = None
self.debug = debug
self.sni = sni
self.error = error
self.client_cert_required = client_cert_required
self.client_cert = client_crt
self.client_key = client_key
self.ca_cert = ca_cert
assert grpc_type in (None, "real", "bridge", "web"), grpc_type
self.grpc_type = grpc_type
def as_json(self):
result = {
"test": self.parent.path, "id": id(self),
"url": self.url,
"insecure": self.insecure
}
if self.sni:
result["sni"] = self.sni
if self.method:
result["method"] = self.method
if self.method:
result["maxTLSv"] = self.maxTLSv
if self.method:
result["minTLSv"] = self.minTLSv
if self.cipherSuites:
result["cipherSuites"] = self.cipherSuites
if self.ecdhCurves:
result["ecdhCurves"] = self.ecdhCurves
if self.headers:
result["headers"] = self.headers
if self.body is not None:
result["body"] = encode_body(self.body)
if self.cookies:
result["cookies"] = self.cookies
if self.messages is not None:
result["messages"] = self.messages
if self.client_cert is not None:
result["client_cert"] = self.client_cert
if self.client_key is not None:
result["client_key"] = self.client_key
if self.ca_cert is not None:
result["ca_cert"] = self.ca_cert
if self.client_cert_required:
result["client_cert_required"] = self.client_cert_required
if self.grpc_type:
result["grpc_type"] = self.grpc_type
return result
class Result:
def __init__(self, query, res):
self.query = query
query.result = self
self.parent = query.parent
self.status = res.get("status")
self.headers = res.get("headers")
self.messages = res.get("messages")
self.tls = res.get("tls")
if "body" in res:
self.body = base64.decodebytes(bytes(res["body"], "ASCII"))
else:
self.body = None
self.text = res.get("text")
self.json = res.get("json")
self.backend = BackendResult(self.json) if self.json else None
self.error = res.get("error")
def __repr__(self):
return str(self.as_dict())
def check(self):
if self.query.skip:
pytest.skip(self.query.skip)
if self.query.xfail:
pytest.xfail(self.query.xfail)
if not self.query.ignore_result:
if self.query.error is not None:
found = False
errors = self.query.error
if isinstance(self.query.error, str):
errors = [ self.query.error ]
if self.error is not None:
for error in errors:
if error in self.error:
found = True
break
assert found, "{}: expected error to contain any of {}; got {} instead".format(
self.query.url, ", ".join([ "'%s'" % x for x in errors ]),
("'%s'" % self.error) if self.error else "no error"
)
else:
if self.query.expected != self.status:
self.parent.log_kube_artifacts()
assert self.query.expected == self.status, \
"%s: expected status code %s, got %s instead with error %s" % (
self.query.url, self.query.expected, self.status, self.error)
def as_dict(self) -> Dict[str, Any]:
od = {
'query': self.query.as_json(),
'status': self.status,
'error': self.error,
'headers': self.headers,
}
if self.backend and self.backend.name:
od['backend'] = self.backend.as_dict()
else:
od['json'] = self.json
od['text'] = self.text
return od
# 'RENDERED': {
# 'client': {
# 'request': self.query.as_json(),
# 'response': {
# 'status': self.status,
# 'error': self.error,
# 'headers': self.headers
# }
# },
# 'upstream': {
# 'name': self.backend.name,
# 'request': {
# 'headers': self.backend.request.headers,
# 'url': {
# 'fragment': self.backend.request.url.fragment,
# 'host': self.backend.request.url.host,
# 'opaque': self.backend.request.url.opaque,
# 'path': self.backend.request.url.path,
# 'query': self.backend.request.url.query,
# 'rawQuery': self.backend.request.url.rawQuery,
# 'scheme': self.backend.request.url.scheme,
# 'username': self.backend.request.url.username,
# 'password': self.backend.request.url.password,
# },
# 'host': self.backend.request.host,
# 'tls': {
# 'enabled': self.backend.request.tls.enabled,
# 'server_name': self.backend.request.tls.server_name,
# 'version': self.backend.request.tls.version,
# 'negotiated_protocol': self.backend.request.tls.negotiated_protocol,
# },
# },
# 'response': {
# 'headers': self.backend.response.headers
# }
# }
# }
class BackendURL:
def __init__(self, fragment=None, host=None, opaque=None, path=None, query=None, rawQuery=None,
scheme=None, username=None, password=None):
self.fragment = fragment
self.host = host
self.opaque = opaque
self.path = path
self.query = query
self.rawQuery = rawQuery
self.scheme = scheme
self.username = username
self.password = password
def as_dict(self) -> Dict['str', Any]:
return {
'fragment': self.fragment,
'host': self.host,
'opaque': self.opaque,
'path': self.path,
'query': self.query,
'rawQuery': self.rawQuery,
'scheme': self.scheme,
'username': self.username,
'password': self.password,
}
class BackendRequest:
def __init__(self, req):
self.url = BackendURL(**req.get("url"))
self.headers = req.get("headers", {})
self.host = req.get("host", None)
self.tls = BackendTLS(req.get("tls", {}))
def as_dict(self) -> Dict[str, Any]:
od = {
'headers': self.headers,
'host': self.host,
}
if self.url:
od['url'] = self.url.as_dict()
if self.tls:
od['tls'] = self.tls.as_dict()
return od
class BackendTLS:
def __init__(self, tls):
self.enabled = tls["enabled"]
self.server_name = tls.get("server-name")
self.version = tls.get("version")
self.negotiated_protocol = tls.get("negotiated-protocol")
self.negotiated_protocol_version = tls.get("negotiated-protocol-version")
def as_dict(self) -> Dict[str, Any]:
return {
'enabled': self.enabled,
'server_name': self.server_name,
'version': self.version,
'negotiated_protocol': self.negotiated_protocol,
'negotiated_protocol_version': self.negotiated_protocol_version,
}
class BackendResponse:
def __init__(self, resp):
self.headers = resp.get("headers", {})
def as_dict(self) -> Dict[str, Any]:
return { 'headers': self.headers }
def dictify(obj):
if getattr(obj, "as_dict", None):
return obj.as_dict()
else:
return obj
class BackendResult:
def __init__(self, bres):
self.name = "raw"
self.request = None
self.response = bres
if isinstance(bres, dict):
self.name = bres.get("backend")
self.request = BackendRequest(bres["request"]) if "request" in bres else None
self.response = BackendResponse(bres["response"]) if "response" in bres else None
def as_dict(self) -> Dict[str, Any]:
od = {
'name': self.name
}
if self.request:
od['request'] = dictify(self.request)
if self.response:
od['response'] = dictify(self.response)
return od
def label(yaml, scope):
for obj in yaml:
md = obj["metadata"]
if "labels" not in md:
md["labels"] = {}
obj["metadata"]["labels"]["scope"] = scope
return yaml
CLIENT_GO = "kat_client"
def run_queries(name: str, queries: Sequence[Query]) -> Sequence[Result]:
jsonified = []
byid = {}
for q in queries:
jsonified.append(q.as_json())
byid[id(q)] = q
path_urls = f'/tmp/kat-client-{name}-urls.json'
path_results = f'/tmp/kat-client-{name}-results.json'
path_log = f'/tmp/kat-client-{name}.log'
with open(path_urls, 'w') as f:
json.dump(jsonified, f)
# run(f"{CLIENT_GO} -input {path_urls} -output {path_results} 2> {path_log}")
res = ShellCommand.run('Running queries',
f"kubectl exec -n default -i kat /work/kat_client < '{path_urls}' > '{path_results}' 2> '{path_log}'",
shell=True)
if not res:
ret = [Result(q, {"error":"Command execution error"}) for q in queries]
return ret
with open(path_results, 'r') as f:
content = f.read()
try:
json_results = json.loads(content)
except Exception as e:
ret = [Result(q, {"error":"Could not parse JSON content after running KAT queries"}) for q in queries]
return ret
results = []
for r in json_results:
res = r["result"]
q = byid[r["id"]]
results.append(Result(q, res))
return results
# yuck
DOCTEST = False
class Superpod:
def __init__(self, namespace: str) -> None:
self.namespace = namespace
self.next_clear = 8080
self.next_tls = 8443
self.service_names: Dict[int, str] = {}
self.name = 'superpod-%s' % (self.namespace or 'default')
def allocate(self, service_name) -> List[int]:
ports = [ self.next_clear, self.next_tls ]
self.service_names[self.next_clear] = service_name
self.service_names[self.next_tls] = service_name
self.next_clear += 1
self.next_tls += 1
return ports
def get_manifest_list(self) -> List[Dict[str, Any]]:
SUPERPOD_POD = load_manifest("superpod_pod")
manifest = load('superpod', SUPERPOD_POD.format(environ=os.environ), Tag.MAPPING)
assert len(manifest) == 1, "SUPERPOD manifest must have exactly one object"
m = manifest[0]
template = m['spec']['template']
ports: List[Dict[str, int]] = []
envs: List[Dict[str, Union[str, int]]] = template['spec']['containers'][0]['env']
for p in sorted(self.service_names.keys()):
ports.append({ 'containerPort': p })
envs.append({ 'name': f'BACKEND_{p}', 'value': self.service_names[p] })
template['spec']['containers'][0]['ports'] = ports
if 'metadata' not in m:
m['metadata'] = {}
metadata = m['metadata']
metadata['name'] = self.name
m['spec']['selector']['matchLabels']['backend'] = self.name
template['metadata']['labels']['backend'] = self.name
if self.namespace:
# Fix up the namespace.
if 'namespace' not in metadata:
metadata['namespace'] = self.namespace
return list(manifest)
class Runner:
def __init__(self, *classes, scope=None):
self.scope = scope or "-".join(c.__name__ for c in classes)
self.roots = tuple(v for c in classes for v in variants(c))
self.nodes = [n for r in self.roots for n in r.traversal if not n.skip_node]
self.tests = [n for n in self.nodes if isinstance(n, Test)]
self.ids = [t.path for t in self.tests]
self.done = False
self.skip_nonlocal_tests = False
self.ids_to_strip: Dict[str, bool] = {}
self.names_to_ignore: Dict[str, bool] = {}
@pytest.mark.parametrize("t", self.tests, ids=self.ids)
def test(request, capsys, t):
if t.xfail:
pytest.xfail(t.xfail)
else:
selected = set(item.callspec.getparam('t') for item in request.session.items if item.function == test)
with capsys.disabled():
self.setup(selected)
if not t.handle_local_result():
# XXX: should aggregate the result of url checks
i = 0
for r in t.results:
try:
r.check()
except AssertionError as e:
# Add some context so that you can tell which query is failing.
e.args = (f"query[{i}]: {e.args[0]}", *e.args[1:])
raise
i += 1
t.check()
self.__func__ = test
self.__test__ = True
def __call__(self):
assert False, "this is here for py.test discovery purposes only"
def setup(self, selected):
if not self.done:
if not DOCTEST:
print()
expanded_up = set(selected)
for s in selected:
for n in s.ancestors:
if not n.xfail:
expanded_up.add(n)
expanded = set(expanded_up)
for s in selected:
for n in s.traversal:
if not n.xfail:
expanded.add(n)
try:
self._setup_k8s(expanded)
if self.skip_nonlocal_tests:
self.done = True
return
for t in self.tests:
if t.has_local_result():
# print(f"{t.name}: SKIP due to local result")
continue
if t in expanded_up:
pre_query: Callable = getattr(t, "pre_query", None)
if pre_query:
pre_query()
self._query(expanded_up)
except:
traceback.print_exc()
pytest.exit("setup failed")
finally:
self.done = True
def get_manifests_and_namespace(self, selected) -> Tuple[Any, str]:
manifests: OrderedDict[Any, list] = OrderedDict() # type: ignore
superpods: Dict[str, Superpod] = {}
for n in (n for n in self.nodes if n in selected and not n.xfail):
manifest = None
nsp = None
ambassador_id = None
# print('manifesting for {n.path}')
# Walk up the parent chain to find our namespace and ambassador_id.
cur = n
while cur:
if not nsp:
nsp = getattr(cur, 'namespace', None)
# print(f'... {cur.name} has namespace {nsp}')
if not ambassador_id:
ambassador_id = getattr(cur, 'ambassador_id', None)
# print(f'... {cur.name} has ambassador_id {ambassador_id}')
if nsp and ambassador_id:
# print(f'... good for namespace and ambassador_id')
break
cur = cur.parent
# OK. Does this node want to use a superpod?
if getattr(n, 'use_superpod', False):
# Yup. OK. Do we already have a superpod for this namespace?
superpod = superpods.get(nsp, None) # type: ignore
if not superpod:
# We don't have one, so we need to create one.
superpod = Superpod(nsp) # type: ignore
superpods[nsp] = superpod # type: ignore
# print(f'superpodifying {n.name}')
# Next up: use the BACKEND_SERVICE manifest as a template...
BACKEND_SERVICE = load_manifest("backend_service")
yaml = n.format(BACKEND_SERVICE)
manifest = load(n.path, yaml, Tag.MAPPING)
assert len(manifest) == 1, "BACKEND_SERVICE manifest must have exactly one object"
m = manifest[0]
# Update the manifest's selector...
m['spec']['selector']['backend'] = superpod.name
# ...and labels if needed...
if ambassador_id:
m['metadata']['labels'] = { 'kat-ambassador-id': ambassador_id }
# ...and target ports.
superpod_ports = superpod.allocate(n.path.k8s)
m['spec']['ports'][0]['targetPort'] = superpod_ports[0]
m['spec']['ports'][1]['targetPort'] = superpod_ports[1]
else:
# The non-superpod case...
yaml = n.manifests()
if yaml is not None:
add_cleartext_host = getattr(n, 'edge_stack_cleartext_host', False)
is_plain_test = n.path.k8s.startswith("plain-")
if EDGE_STACK and n.is_ambassador and add_cleartext_host and not is_plain_test:
# print(f"{n.path.k8s} adding Host")
host_yaml = cleartext_host_manifest % nsp
yaml += host_yaml
yaml = n.format(yaml)
try:
manifest = load(n.path, yaml, Tag.MAPPING)
except Exception as e:
print(f'parse failure! {e}')
print(yaml)
if manifest:
# print(manifest)
# Make sure namespaces and labels are properly set.
for m in manifest:
if 'metadata' not in m:
m['metadata'] = {}
metadata = m['metadata']
if 'labels' not in metadata:
metadata['labels'] = {}
if ambassador_id:
metadata['labels']['kat-ambassador-id'] = ambassador_id
if nsp:
if 'namespace' not in metadata:
metadata['namespace'] = nsp
# ...and, finally, save the manifest list.
manifests[n] = list(manifest)
for superpod in superpods.values():
manifests[superpod] = superpod.get_manifest_list()
return manifests, str(nsp)
def do_local_checks(self, selected, fname) -> bool:
if RUN_MODE == 'envoy':
print("Local mode not allowed, continuing to Envoy mode")
return False
all_valid = True
self.ids_to_strip = {}
# This feels a bit wrong?
self.names_to_ignore = {}
for n in (n for n in self.nodes if n in selected):
local_possible, local_checked = n.check_local(GOLD_ROOT, fname)
if local_possible:
if local_checked:
self.ids_to_strip[n.ambassador_id] = True
else:
all_valid = False
return all_valid
def _setup_k8s(self, selected):
# First up, get the full manifest and save it to disk.
manifests, namespace = self.get_manifests_and_namespace(selected)
configs = OrderedDict()
for n in (n for n in self.nodes if n in selected and not n.xfail):
configs[n] = []
for cfg in n.config():
if isinstance(cfg, str):
parent_config = configs[n.parent][0][1][0]
try:
for o in load(n.path, cfg, Tag.MAPPING):
parent_config.merge(o)
except YAMLScanError as e:
raise Exception("Parse Error: %s, input text:\n%s" % (e, cfg))
else:
target = cfg[0]
try:
yaml = load(n.path, cfg[1], Tag.MAPPING)
if n.ambassador_id:
for obj in yaml:
if "ambassador_id" not in obj:
obj["ambassador_id"] = [n.ambassador_id]
configs[n].append((target, yaml))
except YAMLScanError as e:
raise Exception("Parse Error: %s, input text:\n%s" % (e, cfg[1]))
for tgt_cfgs in configs.values():
for target, cfg in tgt_cfgs:
for t in target.traversal:
if t in manifests:
k8s_yaml = manifests[t]
for item in k8s_yaml:
if item["kind"].lower() == "service":
md = item["metadata"]
if "annotations" not in md:
md["annotations"] = {}
anns = md["annotations"]
if "getambassador.io/config" in anns:
anns["getambassador.io/config"] += "\n" + dump(cfg)
else:
anns["getambassador.io/config"] = dump(cfg)
break
else:
continue
break
else:
assert False, "no service found for target: %s" % target.path
yaml = ""
for v in manifests.values():
yaml += dump(label(v, self.scope)) + "\n"
fname = "/tmp/k8s-%s.yaml" % self.scope
self.applied_manifests = False
# Always apply at this point, since we're doing the multi-run thing.
manifest_changed, manifest_reason = has_changed(yaml, fname)
# OK. Try running local stuff.
if self.do_local_checks(selected, fname):
# Everything that could run locally did. Good enough.
self.skip_nonlocal_tests = True
return True
# Something didn't work out quite right.
print(f'Continuing with Kube tests...')
# print(f"ids_to_strip {self.ids_to_strip}")
# XXX It is _so stupid_ that we're reparsing the whole manifest here.
xxx_crap = pyyaml.load_all(open(fname, "r").read(), Loader=pyyaml_loader)
# Strip things we don't need from the manifest.
trimmed_manifests = []
trimmed = 0
kept = 0
for obj in xxx_crap:
keep = True
kind = '-nokind-'
name = '-noname-'
metadata: Dict[str, Any] = {}
labels: Dict[str, str] = {}
id_to_check: Optional[str] = None
if 'kind' in obj:
kind = obj['kind']
if 'metadata' in obj:
metadata = obj['metadata']
if 'name' in metadata:
name = metadata['name']
if 'labels' in metadata:
labels = metadata['labels']
if 'kat-ambassador-id' in labels:
id_to_check = labels['kat-ambassador-id']
# print(f"metadata {metadata} id_to_check {id_to_check} obj {obj}")
# Keep namespaces, just in case.
if kind == 'Namespace':
keep = True
else:
if id_to_check and (id_to_check in self.ids_to_strip):
keep = False
# print(f"...drop {kind} {name} (ID {id_to_check})")
self.names_to_ignore[name] = True
if keep:
kept += 1
trimmed_manifests.append(obj)
else:
trimmed += 1
if trimmed:
print(f"After trimming: kept {kept}, trimmed {trimmed}")
yaml = pyyaml.dump_all(trimmed_manifests, Dumper=pyyaml_dumper)
fname = "/tmp/k8s-%s-trimmed.yaml" % self.scope
self.applied_manifests = False
# Always apply at this point, since we're doing the multi-run thing.
manifest_changed, manifest_reason = has_changed(yaml, fname)
# First up: CRDs.
CRDS = load_manifest("crds")
input_crds = CRDS
if is_knative_compatible():
KNATIVE_SERVING_CRDS = load_manifest("knative_serving_crds")
input_crds += KNATIVE_SERVING_CRDS
# Strip out all of the schema validation, so that we can test with broken CRDs.
# (KAT isn't really in the business of testing to be sure that Kubernetes can
# run the K8s validators...)
crds = pyyaml.load_all(input_crds, Loader=pyyaml_loader)
# Collect the CRDs with schema validation stripped in stripped_crds, because
# pyyaml.load_all actually returns something more complex than a simple list,
# so it doesn't reserialize well after being modified.
stripped_crds = []
for crd in crds:
# Guard against empty CRDs (the KNative files have some blank lines at
# the end).
if not crd:
continue
crd["spec"].pop("validation", None)
stripped_crds.append(crd)
final_crds = pyyaml.dump_all(stripped_crds, Dumper=pyyaml_dumper)
changed, reason = has_changed(final_crds, "/tmp/k8s-CRDs.yaml")
if changed:
print(f'CRDS changed ({reason}), applying.')
if not ShellCommand.run_with_retry(
'Apply CRDs',
'kubectl', 'apply', '-f', '/tmp/k8s-CRDs.yaml',
retries=5, sleep_seconds=10):
raise RuntimeError("Failed applying CRDs")
tries_left = 10
while os.system('kubectl get crd mappings.getambassador.io > /dev/null 2>&1') != 0:
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("CRDs never became available")
print("sleeping for CRDs... (%d)" % tries_left)
time.sleep(5)
else:
print(f'CRDS unchanged {reason}, skipping apply.')
# Next up: the KAT pod.
KAT_CLIENT_POD = load_manifest("kat_client_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
KAT_CLIENT_POD = namespace_manifest("default") + KAT_CLIENT_POD
changed, reason = has_changed(KAT_CLIENT_POD.format(environ=os.environ), "/tmp/k8s-kat-pod.yaml")
if changed:
print(f'KAT pod definition changed ({reason}), applying')
if not ShellCommand.run_with_retry('Apply KAT pod',
'kubectl', 'apply', '-f' , '/tmp/k8s-kat-pod.yaml', '-n', 'default',
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifest for KAT pod')
tries_left = 3
time.sleep(1)
while True:
if ShellCommand.run("wait for KAT pod",
'kubectl', '-n', 'default', 'wait', '--timeout=30s', '--for=condition=Ready', 'pod', 'kat'):
print("KAT pod ready")
break
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("KAT pod never became available")
print("sleeping for KAT pod... (%d)" % tries_left)
time.sleep(5)
else:
print(f'KAT pod definition unchanged {reason}, skipping apply.')
# Use a dummy pod to get around the !*@&#$!*@&# DockerHub rate limit.
# XXX Better: switch to GCR.
dummy_pod = load_manifest("dummy_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
dummy_pod = namespace_manifest("default") + dummy_pod
changed, reason = has_changed(dummy_pod.format(environ=os.environ), "/tmp/k8s-dummy-pod.yaml")
if changed:
print(f'Dummy pod definition changed ({reason}), applying')
if not ShellCommand.run_with_retry('Apply dummy pod',
'kubectl', 'apply', '-f' , '/tmp/k8s-dummy-pod.yaml', '-n', 'default',
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifest for dummy pod')
tries_left = 3
time.sleep(1)
while True:
if ShellCommand.run("wait for dummy pod",
'kubectl', '-n', 'default', 'wait', '--timeout=30s', '--for=condition=Ready', 'pod', 'dummy-pod'):
print("Dummy pod ready")
break
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("Dummy pod never became available")
print("sleeping for dummy pod... (%d)" % tries_left)
time.sleep(5)
else:
print(f'Dummy pod definition unchanged {reason}, skipping apply.')
# # Clear out old stuff.
if os.environ.get("DEV_CLEAN_K8S_RESOURCES", False):
print("Clearing cluster...")
ShellCommand.run('clear old Kubernetes namespaces',
'kubectl', 'delete', 'namespaces', '-l', 'scope=AmbassadorTest',
verbose=True)
ShellCommand.run('clear old Kubernetes pods etc.',
'kubectl', 'delete', 'all', '-l', 'scope=AmbassadorTest', '--all-namespaces',
verbose=True)
# XXX: better prune selector label
if manifest_changed:
print(f"manifest changed ({manifest_reason}), applying...")
if not ShellCommand.run_with_retry('Applying k8s manifests',
'kubectl', 'apply', '--prune', '-l', 'scope=%s' % self.scope, '-f', fname,
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifests')
self.applied_manifests = True
# Finally, install httpbin and the websocket-echo-server.
apply_kube_artifacts(namespace, httpbin_manifests)
apply_kube_artifacts(namespace, websocket_echo_server_manifests)
for n in self.nodes:
if n in selected and not n.xfail:
action = getattr(n, "post_manifest", None)
if action:
action()
self._wait(selected)
print("Waiting 5s after requirements, just because...")
time.sleep(5)
@staticmethod
def _req_str(kind, req) -> str:
printable = req
if kind == 'url':
printable = req.url
return printable
def _wait(self, selected):
requirements = []
for node in selected:
if node.xfail:
continue
node_name = node.format("{self.path.k8s}")
ambassador_id = getattr(node, 'ambassador_id', None)
# print(f"{node_name} {ambassador_id}")
if node.has_local_result():
# print(f"{node_name} has local result, skipping")
continue
if ambassador_id and ambassador_id in self.ids_to_strip:
# print(f"{node_name} has id {ambassador_id}, stripping")
continue
if node_name in self.names_to_ignore:
# print(f"{node_name} marked to ignore, stripping")
continue
# if RUN_MODE != "envoy":
# print(f"{node_name}: including in nonlocal tests")
for kind, req in node.requirements():
# print(f"{node_name} add req ({node_name}, {kind}, {self._req_str(kind, req)})")
requirements.append((node, kind, req))
homogenous = {}
for node, kind, name in requirements:
if kind not in homogenous:
homogenous[kind] = []
homogenous[kind].append((node, name))
kinds = [ "pod", "url" ]
delay = 5
start = time.time()
limit = int(os.environ.get("KAT_REQ_LIMIT", "600"))
print("Starting requirements check (limit %ds)... " % limit)
holdouts = {}
while time.time() - start < limit:
for kind in kinds:
if kind not in homogenous:
continue
reqs = homogenous[kind]
print("Checking %s %s requirements... " % (len(reqs), kind), end="")
# print("\n")
# for node, req in reqs:
# print(f"...{node.format("{self.path.k8s}")} - {self._req_str(kind, req)}")
sys.stdout.flush()
is_ready, _holdouts = self._ready(kind, reqs)
if not is_ready:
holdouts[kind] = _holdouts
delay = int(min(delay*2, 10))
print("sleeping %ss..." % delay)
sys.stdout.flush()
time.sleep(delay)
else:
print("satisfied.")
sys.stdout.flush()
kinds.remove(kind)
break
else:
return
print("requirements not satisfied in %s seconds:" % limit)
for kind in kinds:
_holdouts = holdouts.get(kind, [])
if _holdouts:
print(f' {kind}:')
for node, text in _holdouts:
print(f' {node.path.k8s} ({text})')
node.log_kube_artifacts()
assert False, "requirements not satisfied in %s seconds" % limit
@multi
def _ready(self, kind, _):
return kind
@_ready.when("pod") # type: ignore
def _ready(self, _, requirements):
pods = self._pods(self.scope)
not_ready = []
for node, name in requirements:
if not pods.get(name, False):
not_ready.append((node, name))
if not_ready:
print("%d not ready (%s), " % (len(not_ready), name), end="")
return (False, not_ready)
return (True, None)
@_ready.when("url") # type: ignore
def _ready(self, _, requirements):
queries = []
for node, q in requirements:
q.insecure = True
q.parent = node
queries.append(q)
# print("URL Reqs:")
# print("\n".join([ f'{q.parent.name}: {q.url}' for q in queries ]))
result = run_queries("reqcheck", queries)
not_ready = [r for r in result if r.status != r.query.expected]
if not_ready:
first = not_ready[0]
print("%d not ready (%s: %s) " % (len(not_ready), first.query.url, first.status or first.error), end="")
return (False, [ (x.query.parent, "%s -- %s" % (x.query.url, x.status or x.error)) for x in not_ready ])
else:
return (True, None)
def _pods(self, scope=None):
scope_for_path = scope if scope else 'global'
label_for_scope = f'-l scope={scope}' if scope else ''
fname = f'/tmp/pods-{scope_for_path}.json'
if not ShellCommand.run_with_retry('Getting pods',
f'kubectl get pod {label_for_scope} --all-namespaces -o json > {fname}',
shell=True, retries=5, sleep_seconds=10):
raise RuntimeError('Could not get pods')
with open(fname) as f:
raw_pods = json.load(f)
pods = {}
for p in raw_pods["items"]:
name = p["metadata"]["name"]
cstats = p["status"].get("containerStatuses", [])
all_ready = True
for status in cstats:
ready = status.get('ready', False)
if not ready:
all_ready = False
# print(f'pod {name} is not ready: {status.get('state', 'unknown state')}')
pods[name] = all_ready
return pods
def _query(self, selected) -> None:
queries = []
for t in self.tests:
t_name = t.format('{self.path.k8s}')
if t in selected:
t.pending = []
t.queried = []
t.results = []
else:
continue
if t.has_local_result():
# print(f"{t_name}: SKIP QUERY due to local result")
continue
ambassador_id = getattr(t, 'ambassador_id', None)
if ambassador_id and ambassador_id in self.ids_to_strip:
# print(f"{t_name}: SKIP QUERY due to ambassador_id {ambassador_id}")
continue
# print(f"{t_name}: INCLUDE QUERY")
for q in t.queries():
q.parent = t
t.pending.append(q)
queries.append(q)
phases = sorted(set([q.phase for q in queries]))
first = True
for phase in phases:
if not first:
phase_delay = int(os.environ.get("KAT_PHASE_DELAY", 10))
print("Waiting for {} seconds before starting phase {}...".format(phase_delay, phase))
time.sleep(phase_delay)
first = False
phase_queries = [q for q in queries if q.phase == phase]
print("Querying %s urls in phase %s..." % (len(phase_queries), phase), end="")
sys.stdout.flush()
results = run_queries(f'phase{phase}', phase_queries)
print(" done.")
for r in results:
t = r.parent
t.queried.append(r.query)
if getattr(t, "debug", False) or getattr(r.query, "debug", False):
print("%s result: %s" % (t.name, json.dumps(r.as_dict(), sort_keys=True, indent=4)))
t.results.append(r)
t.pending.remove(r.query)
| import subprocess
import sys
from abc import ABC
from collections import OrderedDict
from hashlib import sha256
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
from packaging import version
import base64
import fnmatch
import functools
import inspect
import json
import os
import pytest
import time
import threading
import traceback
from .utils import ShellCommand, namespace_manifest
from ambassador.utils import parse_bool
from yaml.scanner import ScannerError as YAMLScanError
from multi import multi
from .parser import dump, load, Tag
from tests.manifests import httpbin_manifests, websocket_echo_server_manifests, cleartext_host_manifest
from tests.kubeutils import apply_kube_artifacts
import yaml as pyyaml
pyyaml_loader: Any = pyyaml.SafeLoader
pyyaml_dumper: Any = pyyaml.SafeDumper
try:
pyyaml_loader = pyyaml.CSafeLoader
pyyaml_dumper = pyyaml.CSafeDumper
except AttributeError:
pass
# Run mode can be local (don't do any Envoy stuff), envoy (only do Envoy stuff),
# or all (allow both). Default is all.
RUN_MODE = os.environ.get('KAT_RUN_MODE', 'all').lower()
# We may have a SOURCE_ROOT override from the environment
SOURCE_ROOT = os.environ.get('SOURCE_ROOT', '')
# Figure out if we're running in Edge Stack or what.
if os.path.exists("/buildroot/apro.version"):
# We let /buildroot/apro.version remain a source of truth to minimize the
# chances that we break anything that currently uses the builder shell.
EDGE_STACK = True
else:
# If we do not see concrete evidence of running in an apro builder shell,
# then try to decide if the user wants us to assume we're running Edge Stack
# from an environment variable. And if that isn't set, just assume OSS.
EDGE_STACK = parse_bool(os.environ.get('EDGE_STACK', 'false'))
if EDGE_STACK:
# Hey look, we're running inside Edge Stack!
print("RUNNING IN EDGE STACK")
# SOURCE_ROOT is optional, and we assume that if it isn't set, the user is
# running in a build shell and we should look for sources in the usual location.
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/apro"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/gold")
MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/manifests")
else:
# We're either not running in Edge Stack or we're not sure, so just assume OSS.
print("RUNNING IN OSS")
# SOURCE_ROOT is optional, and we assume that if it isn't set, the user is
# running in a build shell and we should look for sources in the usual location.
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/ambassador"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "python/tests/gold")
MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "python/tests/integration/manifests")
def load_manifest(manifest_name: str) -> str:
return open(os.path.join(MANIFEST_ROOT, f"{manifest_name.lower()}.yaml"), "r").read()
class TestImage:
def __init__(self, *args, **kwargs) -> None:
self.images: Dict[str, str] = {}
default_registry = os.environ.get('TEST_SERVICE_REGISTRY', 'docker.io/datawire/test_services')
default_version = os.environ.get('TEST_SERVICE_VERSION', '0.0.3')
for svc in ['auth', 'auth-tls', 'ratelimit', 'shadow', 'stats']:
key = svc.replace('-', '_').upper()
image = os.environ.get(f'TEST_SERVICE_{key}', f'{default_registry}:test-{svc}-{default_version}')
self.images[svc] = image
def __getitem__(self, key: str) -> str:
return self.images[key]
GLOBAL_TEST_IMAGE = TestImage()
def run(cmd):
status = os.system(cmd)
if status != 0:
raise RuntimeError("command failed[%s]: %s" % (status, cmd))
def kube_version_json():
result = subprocess.Popen('kubectl version -o json', stdout=subprocess.PIPE, shell=True)
stdout, _ = result.communicate()
return json.loads(stdout)
def strip_version(ver: str):
"""
strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE,
`kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter.
So we handle exceptions like that here.
:param ver: version string
:return: stripped version
"""
try:
return int(ver)
except ValueError as e:
# GKE returns weird versions with '+' in the end
if ver[-1] == '+':
return int(ver[:-1])
# If we still have not taken care of this, raise the error
raise ValueError(e)
def kube_server_version(version_json=None):
if not version_json:
version_json = kube_version_json()
server_json = version_json.get('serverVersion', {})
if server_json:
server_major = strip_version(server_json.get('major', None))
server_minor = strip_version(server_json.get('minor', None))
return f"{server_major}.{server_minor}"
else:
return None
def kube_client_version(version_json=None):
if not version_json:
version_json = kube_version_json()
client_json = version_json.get('clientVersion', {})
if client_json:
client_major = strip_version(client_json.get('major', None))
client_minor = strip_version(client_json.get('minor', None))
return f"{client_major}.{client_minor}"
else:
return None
def is_kube_server_client_compatible(debug_desc: str, requested_server_version: str, requested_client_version: str) -> bool:
is_cluster_compatible = True
kube_json = kube_version_json()
server_version = kube_server_version(kube_json)
client_version = kube_client_version(kube_json)
if server_version:
if version.parse(server_version) < version.parse(requested_server_version):
print(f"server version {server_version} is incompatible with {debug_desc}")
is_cluster_compatible = False
else:
print(f"server version {server_version} is compatible with {debug_desc}")
else:
print("could not determine Kubernetes server version?")
if client_version:
if version.parse(client_version) < version.parse(requested_client_version):
print(f"client version {client_version} is incompatible with {debug_desc}")
is_cluster_compatible = False
else:
print(f"client version {client_version} is compatible with {debug_desc}")
else:
print("could not determine Kubernetes client version?")
return is_cluster_compatible
def is_ingress_class_compatible() -> bool:
return is_kube_server_client_compatible('IngressClass', '1.18', '1.14')
def is_knative_compatible() -> bool:
# Skip KNative immediately for run_mode local.
if RUN_MODE == 'local':
return False
return is_kube_server_client_compatible('Knative', '1.14', '1.14')
def get_digest(data: str) -> str:
s = sha256()
s.update(data.encode('utf-8'))
return s.hexdigest()
def has_changed(data: str, path: str) -> Tuple[bool, str]:
cur_size = len(data.strip()) if data else 0
cur_hash = get_digest(data)
# print(f'has_changed: data size {cur_size} - {cur_hash}')
prev_data = None
changed = True
reason = f'no {path} present'
if os.path.exists(path):
with open(path) as f:
prev_data = f.read()
prev_size = len(prev_data.strip()) if prev_data else 0
prev_hash = None
if prev_data:
prev_hash = get_digest(prev_data)
# print(f'has_changed: prev_data size {prev_size} - {prev_hash}')
if data:
if data != prev_data:
reason = f'different data in {path}'
else:
changed = False
reason = f'same data in {path}'
if changed:
# print(f'has_changed: updating {path}')
with open(path, "w") as f:
f.write(data)
# For now, we always have to reapply with split testing.
if not changed:
changed = True
reason = 'always reapply for split test'
return (changed, reason)
COUNTERS: Dict[Type, int] = {}
SANITIZATIONS = OrderedDict((
("://", "SCHEME"),
(":", "COLON"),
(" ", "SPACE"),
("/t", "TAB"),
(".", "DOT"),
("?", "QMARK"),
("/", "SLASH"),
))
def sanitize(obj):
if isinstance(obj, str):
for k, v in SANITIZATIONS.items():
if obj.startswith(k):
obj = obj.replace(k, v + "-")
elif obj.endswith(k):
obj = obj.replace(k, "-" + v)
else:
obj = obj.replace(k, "-" + v + "-")
return obj
elif isinstance(obj, dict):
if 'value' in obj:
return obj['value']
else:
return "-".join("%s-%s" % (sanitize(k), sanitize(v)) for k, v in sorted(obj.items()))
else:
cls = obj.__class__
count = COUNTERS.get(cls, 0)
COUNTERS[cls] = count + 1
if count == 0:
return cls.__name__
else:
return "%s-%s" % (cls.__name__, count)
def abstract_test(cls: type):
cls.abstract_test = True # type: ignore
return cls
def get_nodes(node_type: type):
if not inspect.isabstract(node_type) and not node_type.__dict__.get("abstract_test", False):
yield node_type
for sc in node_type.__subclasses__():
if not sc.__dict__.get("skip_variant", False):
for ssc in get_nodes(sc):
yield ssc
def variants(cls, *args, **kwargs) -> Tuple[Any]:
return tuple(a for n in get_nodes(cls) for a in n.variants(*args, **kwargs)) # type: ignore
class Name(str):
def __new__(cls, value, namespace=None):
s = super().__new__(cls, value)
s.namespace = namespace
return s
@property
def k8s(self):
return self.replace(".", "-").lower()
@property
def fqdn(self):
r = self.k8s
if self.namespace and (self.namespace != 'default'):
r += '.' + self.namespace
return r
class NodeLocal(threading.local):
def __init__(self):
self.current = None
_local = NodeLocal()
def _argprocess(o):
if isinstance(o, Node):
return o.clone()
elif isinstance(o, tuple):
return tuple(_argprocess(i) for i in o)
elif isinstance(o, list):
return [_argprocess(i) for i in o]
elif isinstance(o, dict):
return {_argprocess(k): _argprocess(v) for k, v in o.items()}
else:
return o
class Node(ABC):
parent: 'Node'
children: List['Node']
name: Name
ambassador_id: str
namespace: str = None # type: ignore
is_ambassador = False
local_result: Optional[Dict[str, str]] = None
def __init__(self, *args, **kwargs) -> None:
# If self.skip is set to true, this node is skipped
self.skip_node = False
self.xfail = None
self.test_image = GLOBAL_TEST_IMAGE
name = kwargs.pop("name", None)
if 'namespace' in kwargs:
self.namespace = kwargs.pop('namespace', None)
_clone: Node = kwargs.pop("_clone", None)
if _clone:
args = _clone._args # type: ignore
kwargs = _clone._kwargs # type: ignore
if name:
name = Name("-".join((_clone.name, name)))
else:
name = _clone.name
self._args = _clone._args # type: ignore
self._kwargs = _clone._kwargs # type: ignore
else:
self._args = args
self._kwargs = kwargs
if name:
name = Name("-".join((self.__class__.__name__, name)))
else:
name = Name(self.__class__.__name__)
saved = _local.current
self.parent = _local.current
if not self.namespace:
if self.parent and self.parent.namespace:
# We have no namespace assigned, but our parent does have a namespace
# defined. Copy the namespace down from our parent.
self.namespace = self.parent.namespace
else:
self.namespace = "default"
_local.current = self
self.children = []
if self.parent is not None:
self.parent.children.append(self)
try:
init = getattr(self, "init", lambda *a, **kw: None)
init(*_argprocess(args), **_argprocess(kwargs))
finally:
_local.current = saved
self.name = Name(self.format(name or self.__class__.__name__))
names = {} # type: ignore
for c in self.children:
assert c.name not in names, ("test %s of type %s has duplicate children: %s of type %s, %s" %
(self.name, self.__class__.__name__, c.name, c.__class__.__name__,
names[c.name].__class__.__name__))
names[c.name] = c
def clone(self, name=None):
return self.__class__(_clone=self, name=name)
def find_local_result(self, stop_at_first_ambassador: bool=False) -> Optional[Dict[str, str]]:
test_name = self.format('{self.path.k8s}')
# print(f"{test_name} {type(self)} FIND_LOCAL_RESULT")
end_result: Optional[Dict[str, str]] = None
n: Optional[Node] = self
while n:
node_name = n.format('{self.path.k8s}')
parent = n.parent
parent_name = parent.format('{self.path.k8s}') if parent else "-none-"
end_result = getattr(n, 'local_result', None)
result_str = end_result['result'] if end_result else '-none-'
# print(f"{test_name}: {'ambassador' if n.is_ambassador else 'node'} {node_name}, parent {parent_name}, local_result = {result_str}")
if end_result is not None:
break
if n.is_ambassador and stop_at_first_ambassador:
# This is an Ambassador: don't continue past it.
break
n = n.parent
return end_result
def check_local(self, gold_root: str, k8s_yaml_path: str) -> Tuple[bool, bool]:
testname = self.format('{self.path.k8s}')
if self.xfail:
# XFail early -- but still return True, True so that we don't try to run Envoy on it.
self.local_result = {
'result': 'xfail',
'reason': self.xfail
}
# print(f"==== XFAIL: {testname} local: {self.xfail}")
return (True, True)
if not self.is_ambassador:
# print(f"{testname} ({type(self)}) is not an Ambassador")
return (False, False)
if not self.ambassador_id:
print(f"{testname} ({type(self)}) is an Ambassador but has no ambassador_id?")
return (False, False)
ambassador_namespace = getattr(self, 'namespace', 'default')
ambassador_single_namespace = getattr(self, 'single_namespace', False)
no_local_mode: bool = getattr(self, 'no_local_mode', False)
skip_local_reason: Optional[str] = getattr(self, 'skip_local_instead_of_xfail', None)
# print(f"{testname}: ns {ambassador_namespace} ({'single' if ambassador_single_namespace else 'multi'})")
gold_path = os.path.join(gold_root, testname)
if os.path.isdir(gold_path) and not no_local_mode:
# print(f"==== {testname} running locally from {gold_path}")
# Yeah, I know, brutal hack.
#
# XXX (Flynn) This code isn't used and we don't know if it works. If you try
# it, bring it up-to-date with the environment created in abstract_tests.py
envstuff = ["env", f"AMBASSADOR_NAMESPACE={ambassador_namespace}"]
cmd = ["mockery", "--debug", k8s_yaml_path,
"-w", "python /ambassador/watch_hook.py",
"--kat", self.ambassador_id,
"--diff", gold_path]
if ambassador_single_namespace:
envstuff.append("AMBASSADOR_SINGLE_NAMESPACE=yes")
cmd += ["-n", ambassador_namespace]
if not getattr(self, 'allow_edge_stack_redirect', False):
envstuff.append("AMBASSADOR_NO_TLS_REDIRECT=yes")
cmd = envstuff + cmd
w = ShellCommand(*cmd)
if w.status():
print(f"==== GOOD: {testname} local against {gold_path}")
self.local_result = {'result': "pass"}
else:
print(f"==== FAIL: {testname} local against {gold_path}")
self.local_result = {
'result': 'fail',
'stdout': w.stdout,
'stderr': w.stderr
}
return (True, True)
else:
# If we have a local reason, has a parent already subsumed us?
#
# XXX The way KAT works, our parent will have always run earlier than us, so
# it's not clear if we can ever not have been subsumed.
if skip_local_reason:
local_result = self.find_local_result()
if local_result:
self.local_result = {
'result': 'skip',
'reason': f"subsumed by {skip_local_reason} -- {local_result['result']}"
}
# print(f"==== {self.local_result['result'].upper()} {testname} {self.local_result['reason']}")
return (True, True)
# OK, we weren't already subsumed. If we're in local mode, we'll skip or xfail
# depending on skip_local_reason.
if RUN_MODE == "local":
if skip_local_reason:
self.local_result = {
'result': 'skip',
# 'reason': f"subsumed by {skip_local_reason} without result in local mode"
}
print(f"==== {self.local_result['result'].upper()} {testname} {self.local_result['reason']}")
return (True, True)
else:
# XFail -- but still return True, True so that we don't try to run Envoy on it.
self.local_result = {
'result': 'xfail',
'reason': f"missing local cache {gold_path}"
}
# print(f"==== {self.local_result['result'].upper()} {testname} {self.local_result['reason']}")
return (True, True)
# If here, we're not in local mode. Allow Envoy to run.
self.local_result = None
# print(f"==== IGNORE {testname} no local cache")
return (True, False)
def has_local_result(self) -> bool:
return bool(self.local_result)
@classmethod
def variants(cls):
yield cls()
@property
def path(self) -> str:
return self.relpath(None)
def relpath(self, ancestor):
if self.parent is ancestor:
return Name(self.name, namespace=self.namespace)
else:
return Name(self.parent.relpath(ancestor) + "." + self.name, namespace=self.namespace)
@property
def traversal(self):
yield self
for c in self.children:
for d in c.traversal:
yield d
@property
def ancestors(self):
yield self
if self.parent is not None:
for a in self.parent.ancestors:
yield a
@property
def depth(self):
if self.parent is None:
return 0
else:
return self.parent.depth + 1
def format(self, st, **kwargs):
serviceAccountExtra = ''
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
serviceAccountExtra = """
imagePullSecrets:
- name: dev-image-pull-secret
"""
return st.format(self=self, environ=os.environ, serviceAccountExtra=serviceAccountExtra, **kwargs)
def get_fqdn(self, name: str) -> str:
if self.namespace and (self.namespace != 'default'):
return f'{name}.{self.namespace}'
else:
return name
@functools.lru_cache()
def matches(self, pattern):
if fnmatch.fnmatch(self.path, "*%s*" % pattern):
return True
for c in self.children:
if c.matches(pattern):
return True
return False
def requirements(self):
yield from ()
# log_kube_artifacts writes various logs about our underlying Kubernetes objects to
# a place where the artifact publisher can find them. See run-tests.sh.
def log_kube_artifacts(self):
if not getattr(self, 'already_logged', False):
self.already_logged = True
print(f'logging kube artifacts for {self.path.k8s}')
sys.stdout.flush()
DEV = os.environ.get("AMBASSADOR_DEV", "0").lower() in ("1", "yes", "true")
log_path = f'/tmp/kat-logs-{self.path.k8s}'
if DEV:
os.system(f'docker logs {self.path.k8s} >{log_path} 2>&1')
else:
os.system(f'kubectl logs -n {self.namespace} {self.path.k8s} >{log_path} 2>&1')
event_path = f'/tmp/kat-events-{self.path.k8s}'
fs1 = f'involvedObject.name={self.path.k8s}'
fs2 = f'involvedObject.namespace={self.namespace}'
cmd = f'kubectl get events -o json --field-selector "{fs1}" --field-selector "{fs2}"'
os.system(f'echo ==== "{cmd}" >{event_path}')
os.system(f'{cmd} >>{event_path} 2>&1')
class Test(Node):
results: Sequence['Result']
__test__ = False
def config(self):
yield from ()
def manifests(self):
return None
def queries(self):
yield from ()
def check(self):
pass
def handle_local_result(self) -> bool:
test_name = self.format('{self.path.k8s}')
# print(f"{test_name} {type(self)} HANDLE_LOCAL_RESULT")
end_result = self.find_local_result()
if end_result is not None:
result_type = end_result['result']
if result_type == 'pass':
pass
elif result_type == 'skip':
pytest.skip(end_result['reason'])
elif result_type == 'fail':
sys.stdout.write(end_result['stdout'])
if os.environ.get('KAT_VERBOSE', None):
sys.stderr.write(end_result['stderr'])
pytest.fail("local check failed")
elif result_type == 'xfail':
pytest.xfail(end_result['reason'])
return True
return False
@property
def ambassador_id(self):
if self.parent is None:
return self.name.k8s
else:
return self.parent.ambassador_id
@multi
def encode_body(obj):
yield type(obj)
@encode_body.when(bytes) # type: ignore
def encode_body(b):
return base64.encodebytes(b).decode("utf-8")
@encode_body.when(str) # type: ignore
def encode_body(s):
return encode_body(s.encode("utf-8"))
@encode_body.default # type: ignore
def encode_body(obj):
return encode_body(json.dumps(obj))
class Query:
def __init__(self, url, expected=None, method="GET", headers=None, messages=None, insecure=False, skip=None,
xfail=None, phase=1, debug=False, sni=False, error=None, client_crt=None, client_key=None,
client_cert_required=False, ca_cert=None, grpc_type=None, cookies=None, ignore_result=False, body=None,
minTLSv="", maxTLSv="", cipherSuites=[], ecdhCurves=[]):
self.method = method
self.url = url
self.headers = headers
self.body = body
self.cookies = cookies
self.messages = messages
self.insecure = insecure
self.minTLSv = minTLSv
self.maxTLSv = maxTLSv
self.cipherSuites = cipherSuites
self.ecdhCurves = ecdhCurves
if expected is None:
if url.lower().startswith("ws:"):
self.expected = 101
else:
self.expected = 200
else:
self.expected = expected
self.skip = skip
self.xfail = xfail
self.ignore_result = ignore_result
self.phase = phase
self.parent = None
self.result = None
self.debug = debug
self.sni = sni
self.error = error
self.client_cert_required = client_cert_required
self.client_cert = client_crt
self.client_key = client_key
self.ca_cert = ca_cert
assert grpc_type in (None, "real", "bridge", "web"), grpc_type
self.grpc_type = grpc_type
def as_json(self):
result = {
"test": self.parent.path, "id": id(self),
"url": self.url,
"insecure": self.insecure
}
if self.sni:
result["sni"] = self.sni
if self.method:
result["method"] = self.method
if self.method:
result["maxTLSv"] = self.maxTLSv
if self.method:
result["minTLSv"] = self.minTLSv
if self.cipherSuites:
result["cipherSuites"] = self.cipherSuites
if self.ecdhCurves:
result["ecdhCurves"] = self.ecdhCurves
if self.headers:
result["headers"] = self.headers
if self.body is not None:
result["body"] = encode_body(self.body)
if self.cookies:
result["cookies"] = self.cookies
if self.messages is not None:
result["messages"] = self.messages
if self.client_cert is not None:
result["client_cert"] = self.client_cert
if self.client_key is not None:
result["client_key"] = self.client_key
if self.ca_cert is not None:
result["ca_cert"] = self.ca_cert
if self.client_cert_required:
result["client_cert_required"] = self.client_cert_required
if self.grpc_type:
result["grpc_type"] = self.grpc_type
return result
class Result:
def __init__(self, query, res):
self.query = query
query.result = self
self.parent = query.parent
self.status = res.get("status")
self.headers = res.get("headers")
self.messages = res.get("messages")
self.tls = res.get("tls")
if "body" in res:
self.body = base64.decodebytes(bytes(res["body"], "ASCII"))
else:
self.body = None
self.text = res.get("text")
self.json = res.get("json")
self.backend = BackendResult(self.json) if self.json else None
self.error = res.get("error")
def __repr__(self):
return str(self.as_dict())
def check(self):
if self.query.skip:
pytest.skip(self.query.skip)
if self.query.xfail:
pytest.xfail(self.query.xfail)
if not self.query.ignore_result:
if self.query.error is not None:
found = False
errors = self.query.error
if isinstance(self.query.error, str):
errors = [ self.query.error ]
if self.error is not None:
for error in errors:
if error in self.error:
found = True
break
assert found, "{}: expected error to contain any of {}; got {} instead".format(
self.query.url, ", ".join([ "'%s'" % x for x in errors ]),
("'%s'" % self.error) if self.error else "no error"
)
else:
if self.query.expected != self.status:
self.parent.log_kube_artifacts()
assert self.query.expected == self.status, \
"%s: expected status code %s, got %s instead with error %s" % (
self.query.url, self.query.expected, self.status, self.error)
def as_dict(self) -> Dict[str, Any]:
od = {
'query': self.query.as_json(),
'status': self.status,
'error': self.error,
'headers': self.headers,
}
if self.backend and self.backend.name:
od['backend'] = self.backend.as_dict()
else:
od['json'] = self.json
od['text'] = self.text
return od
# 'RENDERED': {
# 'client': {
# 'request': self.query.as_json(),
# 'response': {
# 'status': self.status,
# 'error': self.error,
# 'headers': self.headers
# }
# },
# 'upstream': {
# 'name': self.backend.name,
# 'request': {
# 'headers': self.backend.request.headers,
# 'url': {
# 'fragment': self.backend.request.url.fragment,
# 'host': self.backend.request.url.host,
# 'opaque': self.backend.request.url.opaque,
# 'path': self.backend.request.url.path,
# 'query': self.backend.request.url.query,
# 'rawQuery': self.backend.request.url.rawQuery,
# 'scheme': self.backend.request.url.scheme,
# 'username': self.backend.request.url.username,
# 'password': self.backend.request.url.password,
# },
# 'host': self.backend.request.host,
# 'tls': {
# 'enabled': self.backend.request.tls.enabled,
# 'server_name': self.backend.request.tls.server_name,
# 'version': self.backend.request.tls.version,
# 'negotiated_protocol': self.backend.request.tls.negotiated_protocol,
# },
# },
# 'response': {
# 'headers': self.backend.response.headers
# }
# }
# }
class BackendURL:
def __init__(self, fragment=None, host=None, opaque=None, path=None, query=None, rawQuery=None,
scheme=None, username=None, password=None):
self.fragment = fragment
self.host = host
self.opaque = opaque
self.path = path
self.query = query
self.rawQuery = rawQuery
self.scheme = scheme
self.username = username
self.password = password
def as_dict(self) -> Dict['str', Any]:
return {
'fragment': self.fragment,
'host': self.host,
'opaque': self.opaque,
'path': self.path,
'query': self.query,
'rawQuery': self.rawQuery,
'scheme': self.scheme,
'username': self.username,
'password': self.password,
}
class BackendRequest:
def __init__(self, req):
self.url = BackendURL(**req.get("url"))
self.headers = req.get("headers", {})
self.host = req.get("host", None)
self.tls = BackendTLS(req.get("tls", {}))
def as_dict(self) -> Dict[str, Any]:
od = {
'headers': self.headers,
'host': self.host,
}
if self.url:
od['url'] = self.url.as_dict()
if self.tls:
od['tls'] = self.tls.as_dict()
return od
class BackendTLS:
def __init__(self, tls):
self.enabled = tls["enabled"]
self.server_name = tls.get("server-name")
self.version = tls.get("version")
self.negotiated_protocol = tls.get("negotiated-protocol")
self.negotiated_protocol_version = tls.get("negotiated-protocol-version")
def as_dict(self) -> Dict[str, Any]:
return {
'enabled': self.enabled,
'server_name': self.server_name,
'version': self.version,
'negotiated_protocol': self.negotiated_protocol,
'negotiated_protocol_version': self.negotiated_protocol_version,
}
class BackendResponse:
def __init__(self, resp):
self.headers = resp.get("headers", {})
def as_dict(self) -> Dict[str, Any]:
return { 'headers': self.headers }
def dictify(obj):
if getattr(obj, "as_dict", None):
return obj.as_dict()
else:
return obj
class BackendResult:
def __init__(self, bres):
self.name = "raw"
self.request = None
self.response = bres
if isinstance(bres, dict):
self.name = bres.get("backend")
self.request = BackendRequest(bres["request"]) if "request" in bres else None
self.response = BackendResponse(bres["response"]) if "response" in bres else None
def as_dict(self) -> Dict[str, Any]:
od = {
'name': self.name
}
if self.request:
od['request'] = dictify(self.request)
if self.response:
od['response'] = dictify(self.response)
return od
def label(yaml, scope):
for obj in yaml:
md = obj["metadata"]
if "labels" not in md:
md["labels"] = {}
obj["metadata"]["labels"]["scope"] = scope
return yaml
CLIENT_GO = "kat_client"
def run_queries(name: str, queries: Sequence[Query]) -> Sequence[Result]:
jsonified = []
byid = {}
for q in queries:
jsonified.append(q.as_json())
byid[id(q)] = q
path_urls = f'/tmp/kat-client-{name}-urls.json'
path_results = f'/tmp/kat-client-{name}-results.json'
path_log = f'/tmp/kat-client-{name}.log'
with open(path_urls, 'w') as f:
json.dump(jsonified, f)
# run(f"{CLIENT_GO} -input {path_urls} -output {path_results} 2> {path_log}")
res = ShellCommand.run('Running queries',
f"kubectl exec -n default -i kat /work/kat_client < '{path_urls}' > '{path_results}' 2> '{path_log}'",
shell=True)
if not res:
ret = [Result(q, {"error":"Command execution error"}) for q in queries]
return ret
with open(path_results, 'r') as f:
content = f.read()
try:
json_results = json.loads(content)
except Exception as e:
ret = [Result(q, {"error":"Could not parse JSON content after running KAT queries"}) for q in queries]
return ret
results = []
for r in json_results:
res = r["result"]
q = byid[r["id"]]
results.append(Result(q, res))
return results
# yuck
DOCTEST = False
class Superpod:
def __init__(self, namespace: str) -> None:
self.namespace = namespace
self.next_clear = 8080
self.next_tls = 8443
self.service_names: Dict[int, str] = {}
self.name = 'superpod-%s' % (self.namespace or 'default')
def allocate(self, service_name) -> List[int]:
ports = [ self.next_clear, self.next_tls ]
self.service_names[self.next_clear] = service_name
self.service_names[self.next_tls] = service_name
self.next_clear += 1
self.next_tls += 1
return ports
def get_manifest_list(self) -> List[Dict[str, Any]]:
SUPERPOD_POD = load_manifest("superpod_pod")
manifest = load('superpod', SUPERPOD_POD.format(environ=os.environ), Tag.MAPPING)
assert len(manifest) == 1, "SUPERPOD manifest must have exactly one object"
m = manifest[0]
template = m['spec']['template']
ports: List[Dict[str, int]] = []
envs: List[Dict[str, Union[str, int]]] = template['spec']['containers'][0]['env']
for p in sorted(self.service_names.keys()):
ports.append({ 'containerPort': p })
envs.append({ 'name': f'BACKEND_{p}', 'value': self.service_names[p] })
template['spec']['containers'][0]['ports'] = ports
if 'metadata' not in m:
m['metadata'] = {}
metadata = m['metadata']
metadata['name'] = self.name
m['spec']['selector']['matchLabels']['backend'] = self.name
template['metadata']['labels']['backend'] = self.name
if self.namespace:
# Fix up the namespace.
if 'namespace' not in metadata:
metadata['namespace'] = self.namespace
return list(manifest)
class Runner:
def __init__(self, *classes, scope=None):
self.scope = scope or "-".join(c.__name__ for c in classes)
self.roots = tuple(v for c in classes for v in variants(c))
self.nodes = [n for r in self.roots for n in r.traversal if not n.skip_node]
self.tests = [n for n in self.nodes if isinstance(n, Test)]
self.ids = [t.path for t in self.tests]
self.done = False
self.skip_nonlocal_tests = False
self.ids_to_strip: Dict[str, bool] = {}
self.names_to_ignore: Dict[str, bool] = {}
@pytest.mark.parametrize("t", self.tests, ids=self.ids)
def test(request, capsys, t):
if t.xfail:
pytest.xfail(t.xfail)
else:
selected = set(item.callspec.getparam('t') for item in request.session.items if item.function == test)
with capsys.disabled():
self.setup(selected)
if not t.handle_local_result():
# XXX: should aggregate the result of url checks
i = 0
for r in t.results:
try:
r.check()
except AssertionError as e:
# Add some context so that you can tell which query is failing.
e.args = (f"query[{i}]: {e.args[0]}", *e.args[1:])
raise
i += 1
t.check()
self.__func__ = test
self.__test__ = True
def __call__(self):
assert False, "this is here for py.test discovery purposes only"
def setup(self, selected):
if not self.done:
if not DOCTEST:
print()
expanded_up = set(selected)
for s in selected:
for n in s.ancestors:
if not n.xfail:
expanded_up.add(n)
expanded = set(expanded_up)
for s in selected:
for n in s.traversal:
if not n.xfail:
expanded.add(n)
try:
self._setup_k8s(expanded)
if self.skip_nonlocal_tests:
self.done = True
return
for t in self.tests:
if t.has_local_result():
# print(f"{t.name}: SKIP due to local result")
continue
if t in expanded_up:
pre_query: Callable = getattr(t, "pre_query", None)
if pre_query:
pre_query()
self._query(expanded_up)
except:
traceback.print_exc()
pytest.exit("setup failed")
finally:
self.done = True
def get_manifests_and_namespace(self, selected) -> Tuple[Any, str]:
manifests: OrderedDict[Any, list] = OrderedDict() # type: ignore
superpods: Dict[str, Superpod] = {}
for n in (n for n in self.nodes if n in selected and not n.xfail):
manifest = None
nsp = None
ambassador_id = None
# print('manifesting for {n.path}')
# Walk up the parent chain to find our namespace and ambassador_id.
cur = n
while cur:
if not nsp:
nsp = getattr(cur, 'namespace', None)
# print(f'... {cur.name} has namespace {nsp}')
if not ambassador_id:
ambassador_id = getattr(cur, 'ambassador_id', None)
# print(f'... {cur.name} has ambassador_id {ambassador_id}')
if nsp and ambassador_id:
# print(f'... good for namespace and ambassador_id')
break
cur = cur.parent
# OK. Does this node want to use a superpod?
if getattr(n, 'use_superpod', False):
# Yup. OK. Do we already have a superpod for this namespace?
superpod = superpods.get(nsp, None) # type: ignore
if not superpod:
# We don't have one, so we need to create one.
superpod = Superpod(nsp) # type: ignore
superpods[nsp] = superpod # type: ignore
# print(f'superpodifying {n.name}')
# Next up: use the BACKEND_SERVICE manifest as a template...
BACKEND_SERVICE = load_manifest("backend_service")
yaml = n.format(BACKEND_SERVICE)
manifest = load(n.path, yaml, Tag.MAPPING)
assert len(manifest) == 1, "BACKEND_SERVICE manifest must have exactly one object"
m = manifest[0]
# Update the manifest's selector...
m['spec']['selector']['backend'] = superpod.name
# ...and labels if needed...
if ambassador_id:
m['metadata']['labels'] = { 'kat-ambassador-id': ambassador_id }
# ...and target ports.
superpod_ports = superpod.allocate(n.path.k8s)
m['spec']['ports'][0]['targetPort'] = superpod_ports[0]
m['spec']['ports'][1]['targetPort'] = superpod_ports[1]
else:
# The non-superpod case...
yaml = n.manifests()
if yaml is not None:
add_cleartext_host = getattr(n, 'edge_stack_cleartext_host', False)
is_plain_test = n.path.k8s.startswith("plain-")
if EDGE_STACK and n.is_ambassador and add_cleartext_host and not is_plain_test:
# print(f"{n.path.k8s} adding Host")
host_yaml = cleartext_host_manifest % nsp
yaml += host_yaml
yaml = n.format(yaml)
try:
manifest = load(n.path, yaml, Tag.MAPPING)
except Exception as e:
print(f'parse failure! {e}')
print(yaml)
if manifest:
# print(manifest)
# Make sure namespaces and labels are properly set.
for m in manifest:
if 'metadata' not in m:
m['metadata'] = {}
metadata = m['metadata']
if 'labels' not in metadata:
metadata['labels'] = {}
if ambassador_id:
metadata['labels']['kat-ambassador-id'] = ambassador_id
if nsp:
if 'namespace' not in metadata:
metadata['namespace'] = nsp
# ...and, finally, save the manifest list.
manifests[n] = list(manifest)
for superpod in superpods.values():
manifests[superpod] = superpod.get_manifest_list()
return manifests, str(nsp)
def do_local_checks(self, selected, fname) -> bool:
if RUN_MODE == 'envoy':
print("Local mode not allowed, continuing to Envoy mode")
return False
all_valid = True
self.ids_to_strip = {}
# This feels a bit wrong?
self.names_to_ignore = {}
for n in (n for n in self.nodes if n in selected):
local_possible, local_checked = n.check_local(GOLD_ROOT, fname)
if local_possible:
if local_checked:
self.ids_to_strip[n.ambassador_id] = True
else:
all_valid = False
return all_valid
def _setup_k8s(self, selected):
# First up, get the full manifest and save it to disk.
manifests, namespace = self.get_manifests_and_namespace(selected)
configs = OrderedDict()
for n in (n for n in self.nodes if n in selected and not n.xfail):
configs[n] = []
for cfg in n.config():
if isinstance(cfg, str):
parent_config = configs[n.parent][0][1][0]
try:
for o in load(n.path, cfg, Tag.MAPPING):
parent_config.merge(o)
except YAMLScanError as e:
raise Exception("Parse Error: %s, input text:\n%s" % (e, cfg))
else:
target = cfg[0]
try:
yaml = load(n.path, cfg[1], Tag.MAPPING)
if n.ambassador_id:
for obj in yaml:
if "ambassador_id" not in obj:
obj["ambassador_id"] = [n.ambassador_id]
configs[n].append((target, yaml))
except YAMLScanError as e:
raise Exception("Parse Error: %s, input text:\n%s" % (e, cfg[1]))
for tgt_cfgs in configs.values():
for target, cfg in tgt_cfgs:
for t in target.traversal:
if t in manifests:
k8s_yaml = manifests[t]
for item in k8s_yaml:
if item["kind"].lower() == "service":
md = item["metadata"]
if "annotations" not in md:
md["annotations"] = {}
anns = md["annotations"]
if "getambassador.io/config" in anns:
anns["getambassador.io/config"] += "\n" + dump(cfg)
else:
anns["getambassador.io/config"] = dump(cfg)
break
else:
continue
break
else:
assert False, "no service found for target: %s" % target.path
yaml = ""
for v in manifests.values():
yaml += dump(label(v, self.scope)) + "\n"
fname = "/tmp/k8s-%s.yaml" % self.scope
self.applied_manifests = False
# Always apply at this point, since we're doing the multi-run thing.
manifest_changed, manifest_reason = has_changed(yaml, fname)
# OK. Try running local stuff.
if self.do_local_checks(selected, fname):
# Everything that could run locally did. Good enough.
self.skip_nonlocal_tests = True
return True
# Something didn't work out quite right.
print(f'Continuing with Kube tests...')
# print(f"ids_to_strip {self.ids_to_strip}")
# XXX It is _so stupid_ that we're reparsing the whole manifest here.
xxx_crap = pyyaml.load_all(open(fname, "r").read(), Loader=pyyaml_loader)
# Strip things we don't need from the manifest.
trimmed_manifests = []
trimmed = 0
kept = 0
for obj in xxx_crap:
keep = True
kind = '-nokind-'
name = '-noname-'
metadata: Dict[str, Any] = {}
labels: Dict[str, str] = {}
id_to_check: Optional[str] = None
if 'kind' in obj:
kind = obj['kind']
if 'metadata' in obj:
metadata = obj['metadata']
if 'name' in metadata:
name = metadata['name']
if 'labels' in metadata:
labels = metadata['labels']
if 'kat-ambassador-id' in labels:
id_to_check = labels['kat-ambassador-id']
# print(f"metadata {metadata} id_to_check {id_to_check} obj {obj}")
# Keep namespaces, just in case.
if kind == 'Namespace':
keep = True
else:
if id_to_check and (id_to_check in self.ids_to_strip):
keep = False
# print(f"...drop {kind} {name} (ID {id_to_check})")
self.names_to_ignore[name] = True
if keep:
kept += 1
trimmed_manifests.append(obj)
else:
trimmed += 1
if trimmed:
print(f"After trimming: kept {kept}, trimmed {trimmed}")
yaml = pyyaml.dump_all(trimmed_manifests, Dumper=pyyaml_dumper)
fname = "/tmp/k8s-%s-trimmed.yaml" % self.scope
self.applied_manifests = False
# Always apply at this point, since we're doing the multi-run thing.
manifest_changed, manifest_reason = has_changed(yaml, fname)
# First up: CRDs.
CRDS = load_manifest("crds")
input_crds = CRDS
if is_knative_compatible():
KNATIVE_SERVING_CRDS = load_manifest("knative_serving_crds")
input_crds += KNATIVE_SERVING_CRDS
# Strip out all of the schema validation, so that we can test with broken CRDs.
# (KAT isn't really in the business of testing to be sure that Kubernetes can
# run the K8s validators...)
crds = pyyaml.load_all(input_crds, Loader=pyyaml_loader)
# Collect the CRDs with schema validation stripped in stripped_crds, because
# pyyaml.load_all actually returns something more complex than a simple list,
# so it doesn't reserialize well after being modified.
stripped_crds = []
for crd in crds:
# Guard against empty CRDs (the KNative files have some blank lines at
# the end).
if not crd:
continue
crd["spec"].pop("validation", None)
stripped_crds.append(crd)
final_crds = pyyaml.dump_all(stripped_crds, Dumper=pyyaml_dumper)
changed, reason = has_changed(final_crds, "/tmp/k8s-CRDs.yaml")
if changed:
print(f'CRDS changed ({reason}), applying.')
if not ShellCommand.run_with_retry(
'Apply CRDs',
'kubectl', 'apply', '-f', '/tmp/k8s-CRDs.yaml',
retries=5, sleep_seconds=10):
raise RuntimeError("Failed applying CRDs")
tries_left = 10
while os.system('kubectl get crd mappings.getambassador.io > /dev/null 2>&1') != 0:
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("CRDs never became available")
print("sleeping for CRDs... (%d)" % tries_left)
time.sleep(5)
else:
print(f'CRDS unchanged {reason}, skipping apply.')
# Next up: the KAT pod.
KAT_CLIENT_POD = load_manifest("kat_client_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
KAT_CLIENT_POD = namespace_manifest("default") + KAT_CLIENT_POD
changed, reason = has_changed(KAT_CLIENT_POD.format(environ=os.environ), "/tmp/k8s-kat-pod.yaml")
if changed:
print(f'KAT pod definition changed ({reason}), applying')
if not ShellCommand.run_with_retry('Apply KAT pod',
'kubectl', 'apply', '-f' , '/tmp/k8s-kat-pod.yaml', '-n', 'default',
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifest for KAT pod')
tries_left = 3
time.sleep(1)
while True:
if ShellCommand.run("wait for KAT pod",
'kubectl', '-n', 'default', 'wait', '--timeout=30s', '--for=condition=Ready', 'pod', 'kat'):
print("KAT pod ready")
break
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("KAT pod never became available")
print("sleeping for KAT pod... (%d)" % tries_left)
time.sleep(5)
else:
print(f'KAT pod definition unchanged {reason}, skipping apply.')
# Use a dummy pod to get around the !*@&#$!*@&# DockerHub rate limit.
# XXX Better: switch to GCR.
dummy_pod = load_manifest("dummy_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
dummy_pod = namespace_manifest("default") + dummy_pod
changed, reason = has_changed(dummy_pod.format(environ=os.environ), "/tmp/k8s-dummy-pod.yaml")
if changed:
print(f'Dummy pod definition changed ({reason}), applying')
if not ShellCommand.run_with_retry('Apply dummy pod',
'kubectl', 'apply', '-f' , '/tmp/k8s-dummy-pod.yaml', '-n', 'default',
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifest for dummy pod')
tries_left = 3
time.sleep(1)
while True:
if ShellCommand.run("wait for dummy pod",
'kubectl', '-n', 'default', 'wait', '--timeout=30s', '--for=condition=Ready', 'pod', 'dummy-pod'):
print("Dummy pod ready")
break
tries_left -= 1
if tries_left <= 0:
raise RuntimeError("Dummy pod never became available")
print("sleeping for dummy pod... (%d)" % tries_left)
time.sleep(5)
else:
print(f'Dummy pod definition unchanged {reason}, skipping apply.')
# # Clear out old stuff.
if os.environ.get("DEV_CLEAN_K8S_RESOURCES", False):
print("Clearing cluster...")
ShellCommand.run('clear old Kubernetes namespaces',
'kubectl', 'delete', 'namespaces', '-l', 'scope=AmbassadorTest',
verbose=True)
ShellCommand.run('clear old Kubernetes pods etc.',
'kubectl', 'delete', 'all', '-l', 'scope=AmbassadorTest', '--all-namespaces',
verbose=True)
# XXX: better prune selector label
if manifest_changed:
print(f"manifest changed ({manifest_reason}), applying...")
if not ShellCommand.run_with_retry('Applying k8s manifests',
'kubectl', 'apply', '--prune', '-l', 'scope=%s' % self.scope, '-f', fname,
retries=5, sleep_seconds=10):
raise RuntimeError('Could not apply manifests')
self.applied_manifests = True
# Finally, install httpbin and the websocket-echo-server.
apply_kube_artifacts(namespace, httpbin_manifests)
apply_kube_artifacts(namespace, websocket_echo_server_manifests)
for n in self.nodes:
if n in selected and not n.xfail:
action = getattr(n, "post_manifest", None)
if action:
action()
self._wait(selected)
print("Waiting 5s after requirements, just because...")
time.sleep(5)
@staticmethod
def _req_str(kind, req) -> str:
printable = req
if kind == 'url':
printable = req.url
return printable
def _wait(self, selected):
requirements = []
for node in selected:
if node.xfail:
continue
node_name = node.format("{self.path.k8s}")
ambassador_id = getattr(node, 'ambassador_id', None)
# print(f"{node_name} {ambassador_id}")
if node.has_local_result():
# print(f"{node_name} has local result, skipping")
continue
if ambassador_id and ambassador_id in self.ids_to_strip:
# print(f"{node_name} has id {ambassador_id}, stripping")
continue
if node_name in self.names_to_ignore:
# print(f"{node_name} marked to ignore, stripping")
continue
# if RUN_MODE != "envoy":
# print(f"{node_name}: including in nonlocal tests")
for kind, req in node.requirements():
# print(f"{node_name} add req ({node_name}, {kind}, {self._req_str(kind, req)})")
requirements.append((node, kind, req))
homogenous = {}
for node, kind, name in requirements:
if kind not in homogenous:
homogenous[kind] = []
homogenous[kind].append((node, name))
kinds = [ "pod", "url" ]
delay = 5
start = time.time()
limit = int(os.environ.get("KAT_REQ_LIMIT", "600"))
print("Starting requirements check (limit %ds)... " % limit)
holdouts = {}
while time.time() - start < limit:
for kind in kinds:
if kind not in homogenous:
continue
reqs = homogenous[kind]
print("Checking %s %s requirements... " % (len(reqs), kind), end="")
# print("\n")
# for node, req in reqs:
# print(f"...{node.format('{self.path.k8s}')} - {self._req_str(kind, req)}")
sys.stdout.flush()
is_ready, _holdouts = self._ready(kind, reqs)
if not is_ready:
holdouts[kind] = _holdouts
delay = int(min(delay*2, 10))
print("sleeping %ss..." % delay)
sys.stdout.flush()
time.sleep(delay)
else:
print("satisfied.")
sys.stdout.flush()
kinds.remove(kind)
break
else:
return
print("requirements not satisfied in %s seconds:" % limit)
for kind in kinds:
_holdouts = holdouts.get(kind, [])
if _holdouts:
print(f' {kind}:')
for node, text in _holdouts:
print(f' {node.path.k8s} ({text})')
node.log_kube_artifacts()
assert False, "requirements not satisfied in %s seconds" % limit
@multi
def _ready(self, kind, _):
return kind
@_ready.when("pod") # type: ignore
def _ready(self, _, requirements):
pods = self._pods(self.scope)
not_ready = []
for node, name in requirements:
if not pods.get(name, False):
not_ready.append((node, name))
if not_ready:
print("%d not ready (%s), " % (len(not_ready), name), end="")
return (False, not_ready)
return (True, None)
@_ready.when("url") # type: ignore
def _ready(self, _, requirements):
queries = []
for node, q in requirements:
q.insecure = True
q.parent = node
queries.append(q)
# print("URL Reqs:")
# print("\n".join([ f'{q.parent.name}: {q.url}' for q in queries ]))
result = run_queries("reqcheck", queries)
not_ready = [r for r in result if r.status != r.query.expected]
if not_ready:
first = not_ready[0]
print("%d not ready (%s: %s) " % (len(not_ready), first.query.url, first.status or first.error), end="")
return (False, [ (x.query.parent, "%s -- %s" % (x.query.url, x.status or x.error)) for x in not_ready ])
else:
return (True, None)
def _pods(self, scope=None):
scope_for_path = scope if scope else 'global'
label_for_scope = f'-l scope={scope}' if scope else ''
fname = f'/tmp/pods-{scope_for_path}.json'
if not ShellCommand.run_with_retry('Getting pods',
f'kubectl get pod {label_for_scope} --all-namespaces -o json > {fname}',
shell=True, retries=5, sleep_seconds=10):
raise RuntimeError('Could not get pods')
with open(fname) as f:
raw_pods = json.load(f)
pods = {}
for p in raw_pods["items"]:
name = p["metadata"]["name"]
cstats = p["status"].get("containerStatuses", [])
all_ready = True
for status in cstats:
ready = status.get('ready', False)
if not ready:
all_ready = False
# print(f'pod {name} is not ready: {status.get("state", "unknown state")}')
pods[name] = all_ready
return pods
def _query(self, selected) -> None:
queries = []
for t in self.tests:
t_name = t.format('{self.path.k8s}')
if t in selected:
t.pending = []
t.queried = []
t.results = []
else:
continue
if t.has_local_result():
# print(f"{t_name}: SKIP QUERY due to local result")
continue
ambassador_id = getattr(t, 'ambassador_id', None)
if ambassador_id and ambassador_id in self.ids_to_strip:
# print(f"{t_name}: SKIP QUERY due to ambassador_id {ambassador_id}")
continue
# print(f"{t_name}: INCLUDE QUERY")
for q in t.queries():
q.parent = t
t.pending.append(q)
queries.append(q)
phases = sorted(set([q.phase for q in queries]))
first = True
for phase in phases:
if not first:
phase_delay = int(os.environ.get("KAT_PHASE_DELAY", 10))
print("Waiting for {} seconds before starting phase {}...".format(phase_delay, phase))
time.sleep(phase_delay)
first = False
phase_queries = [q for q in queries if q.phase == phase]
print("Querying %s urls in phase %s..." % (len(phase_queries), phase), end="")
sys.stdout.flush()
results = run_queries(f'phase{phase}', phase_queries)
print(" done.")
for r in results:
t = r.parent
t.queried.append(r.query)
if getattr(t, "debug", False) or getattr(r.query, "debug", False):
print("%s result: %s" % (t.name, json.dumps(r.as_dict(), sort_keys=True, indent=4)))
t.results.append(r)
t.pending.remove(r.query)
|
"""
Enable card counting.
Shuffle logic:
Continuous Shuffle Machine?
Shuffle every half deck?
Shuffle on before using last 52 cards.
rename Game.sink to Game.discard_tray
"""
import random
import sys
import time
from ttblackjack.card import Card
from ttblackjack import INSTRUCTIONS
from ttblackjack import Action, CLUB, DIAMOND, HEART, SPADE
from ttblackjack.hand import Hand
from ttblackjack.player import Player
from ttblackjack.algorithm import ALGORITHMS
RSLEEP = True # debug: real sleeep instead of print sleep
class Game:
def __init__(self,
decks, sleep, rounds,
yes, viewer, debug, assist,
players, shoebox, sink) -> None:
self.decks: int = decks
self.sleep: float = sleep
self.rounds: int = rounds
self.yes: bool = yes
self.viewer: str = viewer
self.assist: bool = assist
self.debug: bool = debug
self.players: list = players
self.shoebox: Hand = shoebox
self.sink = sink
@classmethod
def from_config(cls, config):
decks = int(config['decks'])
sleep = float(config['sleep'])
rounds = int(config['rounds'])
yes = bool(config['yes'])
viewer = config['viewer']
assist = bool(config['assist'])
debug = bool(config['debug'])
players = Game.get_players(config)
shoebox = Game.get_shoebox(decks, shuffle=True, debug=debug)
sink = Hand()
return cls(
decks=decks, sleep=sleep,
rounds=rounds, yes=yes,
viewer=viewer, assist=assist, debug=debug,
players=players, shoebox=shoebox,
sink=sink)
@staticmethod
def get_players(config):
""" validate players and make them """
""" later: parse config file elsewhere """
if config['players'][-1]['algorithm'] != 'dealer':
print('Last player must be dealer')
sys.exit(1)
players = []
for p_conf in config['players']:
algo = p_conf['algorithm']
if algo not in ALGORITHMS.keys():
print(f'{algo} is not an acceptable algorithm, choose from {ALGORITHMS}')
sys.exit(1)
is_user = True if algo == 'user' else False
is_dealer = True if algo == 'dealer' else False
p = Player(p_conf['name'], algo, is_user, is_dealer)
players.append(p)
return players
@staticmethod
def get_shoebox(decks, shuffle=True, debug=False) -> Hand:
""" Hand object, fill it """
cards = []
for _d in range(decks):
for suit in (CLUB, DIAMOND, HEART, SPADE):
for v in Card.acceptable_vals:
cards.append(Card(suit, v, debug))
assert len(cards) == decks * 52, 'My Maths are bad'
if shuffle:
random.shuffle(cards)
return Hand(cards)
def welcome(self):
if self.debug:
print('!! WARNING: Running in debug/x-ray mode !!')
print()
print('-- Welcome --')
print(INSTRUCTIONS)
print('-------------')
if not self.yes:
try:
input('(Enter)')
except KeyboardInterrupt:
print('exit')
sys.exit(0)
def _round_start(self):
""" distribute cards and stuff """
for p in self.players:
assert len(p.hand) == 0, f'Player {p} hand is not empty: {p.hand}'
# distribute cards (face up)
for p in self.players:
card = self.shoebox.pop()
card.face_up()
p.hand.append(card)
# distribute cards (face down)
for p in self.players:
card = self.shoebox.pop()
card.face_down() if p.is_dealer else card.face_up()
if self.debug:
card.face_up()
p.hand.append(card)
def _round_end(self):
""" used cards to sink and stuff """
# collect into sink
for p in self.players:
cards = p.hand.dump()
self.sink.extend(cards)
for p in self.players:
assert len(p.hand) == 0, f'Player {p} hand is not empty: {p.hand}'
def _view_round_1(self, curr_player=None):
""" viewer """
print('-' * 20)
for p in self.players:
hint = '* ' if p == curr_player else ' '
e_max = 'BUST' if p.hand.is_busted else p.hand.e_max
if self.debug or curr_player is None: # debug or round finished
print(f'{hint}{p.name:<20}{e_max:<6}{p.hand}')
elif p == self._get_dealer(): # hide dealer
print(f'{hint}{p.name:<20}{' '*6}{p.hand}')
elif self.assist: # in talk
e_max = 'BUST' if p.hand.is_busted else p.hand.e_max
print(f'{hint}{p.name:<20}{e_max:<6}{p.hand}')
else:
print(f'{hint}{p.name:<20}{p.hand}')
print('-' * 20)
if self.debug:
print('x-ray: ')
for _i in range(5):
print(str(self.shoebox.cards[_i]), end=' ')
print('\n', '-' * 20)
def _talk(self):
""" Conversation between dealer and players """
for p in self.players:
if p.is_dealer:
p.hand.cards[-1].face_up()
while True:
self._view_round_1(p)
# check player hand here
if p.hand.is_blackjack:
print(f'{p.name} BLACKJACK !!')
break
if p.hand.is_busted:
print(f'{p.name} BUST !!')
break
self._sleep()
act = ALGORITHMS[p.algo](hand=p.hand)
if act == Action.hit:
self._player_hit(p)
time.sleep(self.sleep)
# print('^^ SLEEP HERE ^^')
elif act == Action.stand:
print(f'{p.name} Stand...')
break
else:
raise Exception(f'Unknown action: {act}')
self._sleep()
print('\n= = =\n')
def _results(self):
print('Last View:')
self._view_round_1()
self._round_stats()
self._game_stats()
def _get_dealer(self):
try:
return [p for p in self.players if p.is_dealer][0]
except IndexError:
print('Cannot find dealer')
sys.exit(1)
def _round_stats(self):
round_stats = {'win': [], 'push': [], 'lose': []}
dealer = self._get_dealer()
# calc
for p in self.players:
if p == dealer: # don't conut dealer
continue
if (not p.hand.is_busted # player not busted
and dealer.hand.is_busted): # dealer busted
round_stats['win'].append(p)
p.score['win'] += 1
elif (p.hand.is_busted # busted
or p.hand.e_max < dealer.hand.e_max): # lower than dealer, e_max ?= None
round_stats['lose'].append(p)
p.score['lose'] += 1
elif p.hand.e_max == dealer.hand.e_max: # push
# put later because both might be busted, e_max ?= None
round_stats['push'].append(p)
p.score['push'] += 1
elif p.hand.e_max > dealer.hand.e_max: # wins
# put later because both might be busted, e_max ?= None
round_stats['win'].append(p)
p.score['win'] += 1
else:
breakpoint()
raise Exception('Unknown result', )
# round stats
print('Round Stats:')
for k, pl in round_stats.items():
print(f'{k:<6}: {', '.join([p.name for p in pl])}')
def _game_stats(self):
print('= ' * 10)
for p in self.players:
print(f'{p.name:<15} {p.score_text}')
print('= ' * 10)
def _player_hit(self, p: Player):
print(f'{p.name} Hit...')
card = self.shoebox.pop()
card.face_up()
p.hand.append(card)
def run(self):
try:
for round_ in range(self.rounds):
print(f'Round: {round_+1}/{self.rounds}')
self._round_start()
self._talk()
self._results()
self._round_end()
input('Next Round? (Or Ctrl+C to end)') if (not self.yes and round_ + 1 != self.rounds) else None
print('\n' * 3)
print('Done')
except KeyboardInterrupt:
print('\nexit\n')
print('Thanks for playing')
self._game_stats()
return
def _sleep(self, t=None):
if RSLEEP:
if t is None:
time.sleep(self.sleep)
else:
time.sleep(float(t))
else:
print('^^ SLEEP HERE ^^')
| """
Enable card counting.
Shuffle logic:
Continuous Shuffle Machine?
Shuffle every half deck?
Shuffle on before using last 52 cards.
rename Game.sink to Game.discard_tray
"""
import random
import sys
import time
from ttblackjack.card import Card
from ttblackjack import INSTRUCTIONS
from ttblackjack import Action, CLUB, DIAMOND, HEART, SPADE
from ttblackjack.hand import Hand
from ttblackjack.player import Player
from ttblackjack.algorithm import ALGORITHMS
RSLEEP = True # debug: real sleeep instead of print sleep
class Game:
def __init__(self,
decks, sleep, rounds,
yes, viewer, debug, assist,
players, shoebox, sink) -> None:
self.decks: int = decks
self.sleep: float = sleep
self.rounds: int = rounds
self.yes: bool = yes
self.viewer: str = viewer
self.assist: bool = assist
self.debug: bool = debug
self.players: list = players
self.shoebox: Hand = shoebox
self.sink = sink
@classmethod
def from_config(cls, config):
decks = int(config['decks'])
sleep = float(config['sleep'])
rounds = int(config['rounds'])
yes = bool(config['yes'])
viewer = config['viewer']
assist = bool(config['assist'])
debug = bool(config['debug'])
players = Game.get_players(config)
shoebox = Game.get_shoebox(decks, shuffle=True, debug=debug)
sink = Hand()
return cls(
decks=decks, sleep=sleep,
rounds=rounds, yes=yes,
viewer=viewer, assist=assist, debug=debug,
players=players, shoebox=shoebox,
sink=sink)
@staticmethod
def get_players(config):
""" validate players and make them """
""" later: parse config file elsewhere """
if config['players'][-1]['algorithm'] != 'dealer':
print('Last player must be dealer')
sys.exit(1)
players = []
for p_conf in config['players']:
algo = p_conf['algorithm']
if algo not in ALGORITHMS.keys():
print(f'{algo} is not an acceptable algorithm, choose from {ALGORITHMS}')
sys.exit(1)
is_user = True if algo == 'user' else False
is_dealer = True if algo == 'dealer' else False
p = Player(p_conf['name'], algo, is_user, is_dealer)
players.append(p)
return players
@staticmethod
def get_shoebox(decks, shuffle=True, debug=False) -> Hand:
""" Hand object, fill it """
cards = []
for _d in range(decks):
for suit in (CLUB, DIAMOND, HEART, SPADE):
for v in Card.acceptable_vals:
cards.append(Card(suit, v, debug))
assert len(cards) == decks * 52, 'My Maths are bad'
if shuffle:
random.shuffle(cards)
return Hand(cards)
def welcome(self):
if self.debug:
print('!! WARNING: Running in debug/x-ray mode !!')
print()
print('-- Welcome --')
print(INSTRUCTIONS)
print('-------------')
if not self.yes:
try:
input('(Enter)')
except KeyboardInterrupt:
print('exit')
sys.exit(0)
def _round_start(self):
""" distribute cards and stuff """
for p in self.players:
assert len(p.hand) == 0, f'Player {p} hand is not empty: {p.hand}'
# distribute cards (face up)
for p in self.players:
card = self.shoebox.pop()
card.face_up()
p.hand.append(card)
# distribute cards (face down)
for p in self.players:
card = self.shoebox.pop()
card.face_down() if p.is_dealer else card.face_up()
if self.debug:
card.face_up()
p.hand.append(card)
def _round_end(self):
""" used cards to sink and stuff """
# collect into sink
for p in self.players:
cards = p.hand.dump()
self.sink.extend(cards)
for p in self.players:
assert len(p.hand) == 0, f'Player {p} hand is not empty: {p.hand}'
def _view_round_1(self, curr_player=None):
""" viewer """
print('-' * 20)
for p in self.players:
hint = '* ' if p == curr_player else ' '
e_max = 'BUST' if p.hand.is_busted else p.hand.e_max
if self.debug or curr_player is None: # debug or round finished
print(f'{hint}{p.name:<20}{e_max:<6}{p.hand}')
elif p == self._get_dealer(): # hide dealer
print(f'{hint}{p.name:<20}{" "*6}{p.hand}')
elif self.assist: # in talk
e_max = 'BUST' if p.hand.is_busted else p.hand.e_max
print(f'{hint}{p.name:<20}{e_max:<6}{p.hand}')
else:
print(f'{hint}{p.name:<20}{p.hand}')
print('-' * 20)
if self.debug:
print('x-ray: ')
for _i in range(5):
print(str(self.shoebox.cards[_i]), end=' ')
print('\n', '-' * 20)
def _talk(self):
""" Conversation between dealer and players """
for p in self.players:
if p.is_dealer:
p.hand.cards[-1].face_up()
while True:
self._view_round_1(p)
# check player hand here
if p.hand.is_blackjack:
print(f'{p.name} BLACKJACK !!')
break
if p.hand.is_busted:
print(f'{p.name} BUST !!')
break
self._sleep()
act = ALGORITHMS[p.algo](hand=p.hand)
if act == Action.hit:
self._player_hit(p)
time.sleep(self.sleep)
# print('^^ SLEEP HERE ^^')
elif act == Action.stand:
print(f'{p.name} Stand...')
break
else:
raise Exception(f'Unknown action: {act}')
self._sleep()
print('\n= = =\n')
def _results(self):
print('Last View:')
self._view_round_1()
self._round_stats()
self._game_stats()
def _get_dealer(self):
try:
return [p for p in self.players if p.is_dealer][0]
except IndexError:
print('Cannot find dealer')
sys.exit(1)
def _round_stats(self):
round_stats = {'win': [], 'push': [], 'lose': []}
dealer = self._get_dealer()
# calc
for p in self.players:
if p == dealer: # don't conut dealer
continue
if (not p.hand.is_busted # player not busted
and dealer.hand.is_busted): # dealer busted
round_stats['win'].append(p)
p.score['win'] += 1
elif (p.hand.is_busted # busted
or p.hand.e_max < dealer.hand.e_max): # lower than dealer, e_max ?= None
round_stats['lose'].append(p)
p.score['lose'] += 1
elif p.hand.e_max == dealer.hand.e_max: # push
# put later because both might be busted, e_max ?= None
round_stats['push'].append(p)
p.score['push'] += 1
elif p.hand.e_max > dealer.hand.e_max: # wins
# put later because both might be busted, e_max ?= None
round_stats['win'].append(p)
p.score['win'] += 1
else:
breakpoint()
raise Exception('Unknown result', )
# round stats
print('Round Stats:')
for k, pl in round_stats.items():
print(f'{k:<6}: {", ".join([p.name for p in pl])}')
def _game_stats(self):
print('= ' * 10)
for p in self.players:
print(f'{p.name:<15} {p.score_text}')
print('= ' * 10)
def _player_hit(self, p: Player):
print(f'{p.name} Hit...')
card = self.shoebox.pop()
card.face_up()
p.hand.append(card)
def run(self):
try:
for round_ in range(self.rounds):
print(f'Round: {round_+1}/{self.rounds}')
self._round_start()
self._talk()
self._results()
self._round_end()
input('Next Round? (Or Ctrl+C to end)') if (not self.yes and round_ + 1 != self.rounds) else None
print('\n' * 3)
print('Done')
except KeyboardInterrupt:
print('\nexit\n')
print('Thanks for playing')
self._game_stats()
return
def _sleep(self, t=None):
if RSLEEP:
if t is None:
time.sleep(self.sleep)
else:
time.sleep(float(t))
else:
print('^^ SLEEP HERE ^^')
|
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import logging
from json import loads
from itertools import chain
# 3rd party:
from azure.durable_functions import (
DurableOrchestrationContext, Orchestrator, RetryOptions
)
# Internal:
try:
from .dtypes import RetrieverPayload, GenericPayload, Manifest, ProcessMode
from .tasks import housekeeping_tasks
except ImportError:
from housekeeping_orchestrator.dtypes import (
RetrieverPayload, GenericPayload, Manifest, ProcessMode
)
from housekeeping_orchestrator.tasks import housekeeping_tasks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'main'
]
@Orchestrator.create
def main(context: DurableOrchestrationContext):
retry_twice_opts = RetryOptions(
first_retry_interval_in_milliseconds=5_000,
max_number_of_attempts=2
)
timestamp = context.current_utc_datetime
trigger_payload = loads(context.get_input())
logging.info(f"triggered with payload: {trigger_payload}")
# ------------------------------------------------------------------------------------
# Retrieve blob paths
# ------------------------------------------------------------------------------------
context.set_custom_status("Retrieving artefacts")
logging.info("retrieving artefacts")
task_artefacts = list()
for task_manifest in housekeeping_tasks:
logging.info(f"submitting '{task_manifest["label"]}' to retriever")
artefacts = context.call_activity_with_retry(
"housekeeping_retriever",
input_=RetrieverPayload(
timestamp=timestamp.isoformat(),
environment=trigger_payload['environment'],
manifest=task_manifest
),
retry_options=retry_twice_opts
)
task_artefacts.append(artefacts)
logging.info("awaiting retriever tasks")
retrieved_artefacts = yield context.task_all(task_artefacts)
# ------------------------------------------------------------------------------------
# Submit for archiving
# ------------------------------------------------------------------------------------
context.set_custom_status("Submitting candidates to the archiver")
logging.info("submitting candidates to the archiver")
archive_modes = [ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.ARCHIVE_ONLY]
activities = list()
for task in chain(*retrieved_artefacts):
logging.info(f"submitting '{task["manifest"]["label"]}' to archiver")
if task["manifest"]["mode"] not in archive_modes:
logging.info("-- not archived")
continue
activity = context.call_activity_with_retry(
"housekeeping_archiver",
input_=task,
retry_options=retry_twice_opts
)
activities.append(activity)
logging.info("awaiting archiver tasks")
archived_artefacts = yield context.task_all(activities)
# ------------------------------------------------------------------------------------
# Dispose of archived blobs
# ------------------------------------------------------------------------------------
context.set_custom_status("Removing archived data")
logging.info("removing archived data")
disposable_only = filter(
lambda t: t['manifest']['mode'] == ProcessMode.DISPOSE_ONLY,
chain(*retrieved_artefacts)
)
disposal_modes = [ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.DISPOSE_ONLY]
activities = list()
for task in chain(archived_artefacts, disposable_only):
logging.info(f"submitting '{task["manifest"]["label"]}' to disposer")
if task["manifest"]["mode"] not in disposal_modes:
logging.info("-- not disposed")
continue
activity = context.call_activity_with_retry(
"housekeeping_disposer",
input_=task,
retry_options=retry_twice_opts
)
activities.append(activity)
logging.info("awaiting disposer tasks")
report = yield context.task_all(activities)
# ------------------------------------------------------------------------------------
context.set_custom_status(f"ALL DONE - processed {report["total_processed"]} artefacts")
return f"DONE - {timestamp.isoformat()}"
| #!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import logging
from json import loads
from itertools import chain
# 3rd party:
from azure.durable_functions import (
DurableOrchestrationContext, Orchestrator, RetryOptions
)
# Internal:
try:
from .dtypes import RetrieverPayload, GenericPayload, Manifest, ProcessMode
from .tasks import housekeeping_tasks
except ImportError:
from housekeeping_orchestrator.dtypes import (
RetrieverPayload, GenericPayload, Manifest, ProcessMode
)
from housekeeping_orchestrator.tasks import housekeeping_tasks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'main'
]
@Orchestrator.create
def main(context: DurableOrchestrationContext):
retry_twice_opts = RetryOptions(
first_retry_interval_in_milliseconds=5_000,
max_number_of_attempts=2
)
timestamp = context.current_utc_datetime
trigger_payload = loads(context.get_input())
logging.info(f"triggered with payload: {trigger_payload}")
# ------------------------------------------------------------------------------------
# Retrieve blob paths
# ------------------------------------------------------------------------------------
context.set_custom_status("Retrieving artefacts")
logging.info("retrieving artefacts")
task_artefacts = list()
for task_manifest in housekeeping_tasks:
logging.info(f"submitting '{task_manifest['label']}' to retriever")
artefacts = context.call_activity_with_retry(
"housekeeping_retriever",
input_=RetrieverPayload(
timestamp=timestamp.isoformat(),
environment=trigger_payload['environment'],
manifest=task_manifest
),
retry_options=retry_twice_opts
)
task_artefacts.append(artefacts)
logging.info("awaiting retriever tasks")
retrieved_artefacts = yield context.task_all(task_artefacts)
# ------------------------------------------------------------------------------------
# Submit for archiving
# ------------------------------------------------------------------------------------
context.set_custom_status("Submitting candidates to the archiver")
logging.info("submitting candidates to the archiver")
archive_modes = [ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.ARCHIVE_ONLY]
activities = list()
for task in chain(*retrieved_artefacts):
logging.info(f"submitting '{task['manifest']['label']}' to archiver")
if task["manifest"]["mode"] not in archive_modes:
logging.info("-- not archived")
continue
activity = context.call_activity_with_retry(
"housekeeping_archiver",
input_=task,
retry_options=retry_twice_opts
)
activities.append(activity)
logging.info("awaiting archiver tasks")
archived_artefacts = yield context.task_all(activities)
# ------------------------------------------------------------------------------------
# Dispose of archived blobs
# ------------------------------------------------------------------------------------
context.set_custom_status("Removing archived data")
logging.info("removing archived data")
disposable_only = filter(
lambda t: t['manifest']['mode'] == ProcessMode.DISPOSE_ONLY,
chain(*retrieved_artefacts)
)
disposal_modes = [ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.DISPOSE_ONLY]
activities = list()
for task in chain(archived_artefacts, disposable_only):
logging.info(f"submitting '{task['manifest']['label']}' to disposer")
if task["manifest"]["mode"] not in disposal_modes:
logging.info("-- not disposed")
continue
activity = context.call_activity_with_retry(
"housekeeping_disposer",
input_=task,
retry_options=retry_twice_opts
)
activities.append(activity)
logging.info("awaiting disposer tasks")
report = yield context.task_all(activities)
# ------------------------------------------------------------------------------------
context.set_custom_status(f"ALL DONE - processed {report['total_processed']} artefacts")
return f"DONE - {timestamp.isoformat()}"
|
""" Helps with running Pylint tests on different modules """
import subprocess
AUTODETECT = 0
def assert_pylint_is_passing(pylintrc, package_dir, number_of_jobs: int = AUTODETECT):
"""Runs Pylint with given inputs. In case of error some helpful Pylint messages are displayed
This is used in different packages
"""
command = f"pylint --jobs={number_of_jobs} --rcfile {pylintrc} -v {package_dir}".split(
" "
)
pipes = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
std_out, _ = pipes.communicate()
if pipes.returncode != 0:
assert (
False
), f"Pylint failed with error\nExit code {pipes.returncode}\n{std_out.decode("utf-8")}"
| """ Helps with running Pylint tests on different modules """
import subprocess
AUTODETECT = 0
def assert_pylint_is_passing(pylintrc, package_dir, number_of_jobs: int = AUTODETECT):
"""Runs Pylint with given inputs. In case of error some helpful Pylint messages are displayed
This is used in different packages
"""
command = f"pylint --jobs={number_of_jobs} --rcfile {pylintrc} -v {package_dir}".split(
" "
)
pipes = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
std_out, _ = pipes.communicate()
if pipes.returncode != 0:
assert (
False
), f"Pylint failed with error\nExit code {pipes.returncode}\n{std_out.decode('utf-8')}"
|
# The MIT License (MIT)
# Copyright (c) 2021-present EQUENOS
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import asyncio
import datetime
import functools
from abc import ABC
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, TypeVar
from disnake.app_commands import ApplicationCommand, UnresolvedGuildApplicationCommandPermissions
from disnake.enums import ApplicationCommandType
from disnake.utils import async_all, maybe_coroutine, warn_deprecated
from .cooldowns import BucketType, CooldownMapping, MaxConcurrency
from .errors import *
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.interactions import ApplicationCommandInteraction
from ._types import Check, Error, Hook
from .cog import Cog
__all__ = ("InvokableApplicationCommand", "guild_permissions")
T = TypeVar("T")
AppCommandT = TypeVar("AppCommandT", bound="InvokableApplicationCommand")
CogT = TypeVar("CogT", bound="Cog")
HookT = TypeVar("HookT", bound="Hook")
ErrorT = TypeVar("ErrorT", bound="Error")
if TYPE_CHECKING:
P = ParamSpec("P")
else:
P = TypeVar("P")
def _get_overridden_method(method):
return getattr(method.__func__, "__cog_special_method__", method)
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise CommandInvokeError(exc) from exc
return ret
return wrapped
class InvokableApplicationCommand(ABC):
"""A base class that implements the protocol for a bot application command.
These are not created manually, instead they are created via the
decorator or functional interface.
"""
body: ApplicationCommand
def __init__(self, func, *, name: str = None, **kwargs):
self.__command_flag__ = None
self._callback: Callable[..., Any] = func
self.name: str = name or func.__name__
self.qualified_name: str = self.name
# only an internal feature for now
self.guild_only: bool = kwargs.get("guild_only", False)
if not isinstance(self.name, str):
raise TypeError("Name of a command must be a string.")
try:
perms = func.__app_command_permissions__
except AttributeError:
perms = {}
self.permissions: Dict[int, UnresolvedGuildApplicationCommandPermissions] = perms
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks: List[Check] = checks
try:
cooldown = func.__commands_cooldown__
except AttributeError:
cooldown = kwargs.get("cooldown")
# TODO: Figure out how cooldowns even work with interactions
if cooldown is None:
buckets = CooldownMapping(cooldown, BucketType.default)
elif isinstance(cooldown, CooldownMapping):
buckets = cooldown
else:
raise TypeError("Cooldown must be a an instance of CooldownMapping or None.")
self._buckets: CooldownMapping = buckets
try:
max_concurrency = func.__commands_max_concurrency__
except AttributeError:
max_concurrency = kwargs.get("max_concurrency")
self._max_concurrency: Optional[MaxConcurrency] = max_concurrency
self.cog: Optional[Cog] = None
self.guild_ids: Optional[List[int]] = None
self.auto_sync: bool = True
self._before_invoke: Optional[Hook] = None
self._after_invoke: Optional[Hook] = None
@property
def callback(self) -> Callable[..., Any]:
"""Callable[..., Any]: The callback associated with the interaction."""
return self._callback
def add_check(self, func: Check) -> None:
"""Adds a check to the application command.
This is the non-decorator interface to :func:`.check`.
Parameters
-----------
func
The function that will be used as a check.
"""
self.checks.append(func)
def remove_check(self, func: Check) -> None:
"""Removes a check from the application command.
This function is idempotent and will not raise an exception
if the function is not in the command's checks.
Parameters
-----------
func
The function to remove from the checks.
"""
try:
self.checks.remove(func)
except ValueError:
pass
async def __call__(self, interaction: ApplicationCommandInteraction, *args, **kwargs) -> Any:
"""|coro|
Calls the internal callback that the application command holds.
.. note::
This bypasses all mechanisms -- including checks, converters,
invoke hooks, cooldowns, etc. You must take care to pass
the proper arguments and types to this function.
"""
if self.cog is not None:
return await self.callback(self.cog, interaction, *args, **kwargs)
else:
return await self.callback(interaction, *args, **kwargs)
def _prepare_cooldowns(self, inter: ApplicationCommandInteraction) -> None:
if self._buckets.valid:
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
bucket = self._buckets.get_bucket(inter, current) # type: ignore
if bucket is not None:
retry_after = bucket.update_rate_limit(current)
if retry_after:
raise CommandOnCooldown(bucket, retry_after, self._buckets.type) # type: ignore
async def prepare(self, inter: ApplicationCommandInteraction) -> None:
inter.application_command = self
if not await self.can_run(inter):
raise CheckFailure(f"The check functions for command {self.qualified_name!r} failed.")
if self._max_concurrency is not None:
await self._max_concurrency.acquire(inter) # type: ignore
try:
self._prepare_cooldowns(inter)
await self.call_before_hooks(inter) # type: ignore
except:
if self._max_concurrency is not None:
await self._max_concurrency.release(inter) # type: ignore
raise
def is_on_cooldown(self, inter: ApplicationCommandInteraction) -> bool:
"""Checks whether the application command is currently on cooldown.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with the application command currently being invoked.
Returns
--------
:class:`bool`
A boolean indicating if the application command is on cooldown.
"""
if not self._buckets.valid:
return False
bucket = self._buckets.get_bucket(inter) # type: ignore
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_tokens(current) == 0
def reset_cooldown(self, inter: ApplicationCommandInteraction) -> None:
"""Resets the cooldown on this application command.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with this application command
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(inter) # type: ignore
bucket.reset()
def get_cooldown_retry_after(self, inter: ApplicationCommandInteraction) -> float:
"""Retrieves the amount of seconds before this application command can be tried again.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with this application command.
Returns
--------
:class:`float`
The amount of time left on this command's cooldown in seconds.
If this is ``0.0`` then the command isn't on cooldown.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(inter) # type: ignore
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_retry_after(current)
return 0.0
async def invoke(self, inter: ApplicationCommandInteraction, *args, **kwargs) -> None:
"""
This method isn't really usable in this class, but it's usable in subclasses.
"""
if self.guild_only and inter.guild_id is None:
await inter.response.send_message("This command cannot be used in DMs", ephemeral=True)
return
await self.prepare(inter)
try:
await self(inter, *args, **kwargs)
except CommandError:
inter.command_failed = True
raise
except asyncio.CancelledError:
inter.command_failed = True
return
except Exception as exc:
inter.command_failed = True
raise CommandInvokeError(exc) from exc
finally:
if self._max_concurrency is not None:
await self._max_concurrency.release(inter) # type: ignore
await self.call_after_hooks(inter)
def error(self, coro: ErrorT) -> ErrorT:
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an error event limited to a single application command.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The error handler must be a coroutine.")
self.on_error: Error = coro
return coro
def has_error_handler(self) -> bool:
"""
Checks whether the application command has an error handler registered.
"""
return hasattr(self, "on_error")
async def _call_local_error_handler(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> Any:
if not self.has_error_handler():
return
injected = wrap_callback(self.on_error)
if self.cog is not None:
return await injected(self.cog, inter, error)
else:
return await injected(inter, error)
async def _call_external_error_handlers(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> None:
"""Overridden in subclasses"""
raise error
async def dispatch_error(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> None:
if not await self._call_local_error_handler(inter, error):
await self._call_external_error_handlers(inter, error)
async def call_before_hooks(self, inter: ApplicationCommandInteraction) -> None:
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, "__self__", cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, inter) # type: ignore
else:
await self._before_invoke(inter) # type: ignore
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return
# call the cog local hook if applicable:
if cog is not None:
meth = getattr(cog, f"cog_before_{partial_attr_name}_invoke", None)
hook = _get_overridden_method(meth)
if hook is not None:
await hook(inter)
# call the bot global hook if necessary
hook = getattr(inter.bot, f"_before_{partial_attr_name}_invoke", None)
if hook is not None:
await hook(inter)
async def call_after_hooks(self, inter: ApplicationCommandInteraction) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, "__self__", cog)
if instance:
await self._after_invoke(instance, inter) # type: ignore
else:
await self._after_invoke(inter) # type: ignore
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return
# call the cog local hook if applicable:
if cog is not None:
meth = getattr(cog, f"cog_after_{partial_attr_name}_invoke", None)
hook = _get_overridden_method(meth)
if hook is not None:
await hook(inter)
# call the bot global hook if necessary
hook = getattr(inter.bot, f"_after_{partial_attr_name}_invoke", None)
if hook is not None:
await hook(inter)
def before_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is called.
This pre-invoke hook takes a sole parameter, a :class:`.ApplicationCommandInteraction`.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is called.
This post-invoke hook takes a sole parameter, a :class:`.ApplicationCommandInteraction`.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
@property
def cog_name(self) -> Optional[str]:
"""Optional[:class:`str`]: The name of the cog this application command belongs to, if any."""
return type(self.cog).__cog_name__ if self.cog is not None else None
async def can_run(self, inter: ApplicationCommandInteraction) -> bool:
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`~Command.checks` attribute.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with the application command currently being invoked.
Raises
-------
:class:`CommandError`
Any application command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the application command can be invoked.
"""
original = inter.application_command
inter.application_command = self
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return True
try:
if inter.bot and not await inter.bot.application_command_can_run(inter):
raise CheckFailure(
f"The global check functions for command {self.qualified_name} failed."
)
cog = self.cog
if cog is not None:
meth = getattr(cog, f"cog_{partial_attr_name}_check", None)
local_check = _get_overridden_method(meth)
if local_check is not None:
ret = await maybe_coroutine(local_check, inter)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await async_all(predicate(inter) for predicate in predicates) # type: ignore
finally:
inter.application_command = original
# kwargs are annotated as None to ensure the user gets a linter error when using them
def guild_permissions(
guild_id: int,
*,
roles: Optional[Mapping[int, bool]] = None,
users: Optional[Mapping[int, bool]] = None,
owner: bool = None,
**kwargs: None,
) -> Callable[[T], T]:
"""
A decorator that sets application command permissions in the specified guild.
This type of permissions "greys out" the command in the command picker.
If you want to change this type of permissions dynamically, this decorator is not useful.
Parameters
----------
guild_id: :class:`int`
the ID of the guild to apply the permissions to.
roles: Mapping[:class:`int`, :class:`bool`]
a mapping of role IDs to boolean values indicating the permission. ``True`` = allow, ``False`` = deny.
users: Mapping[:class:`int`, :class:`bool`]
a mapping of user IDs to boolean values indicating the permission. ``True`` = allow, ``False`` = deny.
owner: :class:`bool`
whether to allow/deny the bot owner(s) to use the command. Set to ``None`` to ignore.
"""
if kwargs:
warn_deprecated(
f"guild_permissions got unexpected deprecated keyword arguments: {", ".join(map(repr, kwargs))}",
stacklevel=2,
)
roles = roles or kwargs.get("role_ids")
users = users or kwargs.get("user_ids")
perms = UnresolvedGuildApplicationCommandPermissions(
role_ids=roles, user_ids=users, owner=owner
)
def decorator(func: T) -> T:
if isinstance(func, InvokableApplicationCommand):
func.permissions[guild_id] = perms
else:
if not hasattr(func, "__app_command_permissions__"):
func.__app_command_permissions__ = {} # type: ignore
func.__app_command_permissions__[guild_id] = perms # type: ignore
return func
return decorator
| # The MIT License (MIT)
# Copyright (c) 2021-present EQUENOS
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import asyncio
import datetime
import functools
from abc import ABC
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, TypeVar
from disnake.app_commands import ApplicationCommand, UnresolvedGuildApplicationCommandPermissions
from disnake.enums import ApplicationCommandType
from disnake.utils import async_all, maybe_coroutine, warn_deprecated
from .cooldowns import BucketType, CooldownMapping, MaxConcurrency
from .errors import *
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.interactions import ApplicationCommandInteraction
from ._types import Check, Error, Hook
from .cog import Cog
__all__ = ("InvokableApplicationCommand", "guild_permissions")
T = TypeVar("T")
AppCommandT = TypeVar("AppCommandT", bound="InvokableApplicationCommand")
CogT = TypeVar("CogT", bound="Cog")
HookT = TypeVar("HookT", bound="Hook")
ErrorT = TypeVar("ErrorT", bound="Error")
if TYPE_CHECKING:
P = ParamSpec("P")
else:
P = TypeVar("P")
def _get_overridden_method(method):
return getattr(method.__func__, "__cog_special_method__", method)
def wrap_callback(coro):
@functools.wraps(coro)
async def wrapped(*args, **kwargs):
try:
ret = await coro(*args, **kwargs)
except CommandError:
raise
except asyncio.CancelledError:
return
except Exception as exc:
raise CommandInvokeError(exc) from exc
return ret
return wrapped
class InvokableApplicationCommand(ABC):
"""A base class that implements the protocol for a bot application command.
These are not created manually, instead they are created via the
decorator or functional interface.
"""
body: ApplicationCommand
def __init__(self, func, *, name: str = None, **kwargs):
self.__command_flag__ = None
self._callback: Callable[..., Any] = func
self.name: str = name or func.__name__
self.qualified_name: str = self.name
# only an internal feature for now
self.guild_only: bool = kwargs.get("guild_only", False)
if not isinstance(self.name, str):
raise TypeError("Name of a command must be a string.")
try:
perms = func.__app_command_permissions__
except AttributeError:
perms = {}
self.permissions: Dict[int, UnresolvedGuildApplicationCommandPermissions] = perms
try:
checks = func.__commands_checks__
checks.reverse()
except AttributeError:
checks = kwargs.get("checks", [])
self.checks: List[Check] = checks
try:
cooldown = func.__commands_cooldown__
except AttributeError:
cooldown = kwargs.get("cooldown")
# TODO: Figure out how cooldowns even work with interactions
if cooldown is None:
buckets = CooldownMapping(cooldown, BucketType.default)
elif isinstance(cooldown, CooldownMapping):
buckets = cooldown
else:
raise TypeError("Cooldown must be a an instance of CooldownMapping or None.")
self._buckets: CooldownMapping = buckets
try:
max_concurrency = func.__commands_max_concurrency__
except AttributeError:
max_concurrency = kwargs.get("max_concurrency")
self._max_concurrency: Optional[MaxConcurrency] = max_concurrency
self.cog: Optional[Cog] = None
self.guild_ids: Optional[List[int]] = None
self.auto_sync: bool = True
self._before_invoke: Optional[Hook] = None
self._after_invoke: Optional[Hook] = None
@property
def callback(self) -> Callable[..., Any]:
"""Callable[..., Any]: The callback associated with the interaction."""
return self._callback
def add_check(self, func: Check) -> None:
"""Adds a check to the application command.
This is the non-decorator interface to :func:`.check`.
Parameters
-----------
func
The function that will be used as a check.
"""
self.checks.append(func)
def remove_check(self, func: Check) -> None:
"""Removes a check from the application command.
This function is idempotent and will not raise an exception
if the function is not in the command's checks.
Parameters
-----------
func
The function to remove from the checks.
"""
try:
self.checks.remove(func)
except ValueError:
pass
async def __call__(self, interaction: ApplicationCommandInteraction, *args, **kwargs) -> Any:
"""|coro|
Calls the internal callback that the application command holds.
.. note::
This bypasses all mechanisms -- including checks, converters,
invoke hooks, cooldowns, etc. You must take care to pass
the proper arguments and types to this function.
"""
if self.cog is not None:
return await self.callback(self.cog, interaction, *args, **kwargs)
else:
return await self.callback(interaction, *args, **kwargs)
def _prepare_cooldowns(self, inter: ApplicationCommandInteraction) -> None:
if self._buckets.valid:
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
bucket = self._buckets.get_bucket(inter, current) # type: ignore
if bucket is not None:
retry_after = bucket.update_rate_limit(current)
if retry_after:
raise CommandOnCooldown(bucket, retry_after, self._buckets.type) # type: ignore
async def prepare(self, inter: ApplicationCommandInteraction) -> None:
inter.application_command = self
if not await self.can_run(inter):
raise CheckFailure(f"The check functions for command {self.qualified_name!r} failed.")
if self._max_concurrency is not None:
await self._max_concurrency.acquire(inter) # type: ignore
try:
self._prepare_cooldowns(inter)
await self.call_before_hooks(inter) # type: ignore
except:
if self._max_concurrency is not None:
await self._max_concurrency.release(inter) # type: ignore
raise
def is_on_cooldown(self, inter: ApplicationCommandInteraction) -> bool:
"""Checks whether the application command is currently on cooldown.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with the application command currently being invoked.
Returns
--------
:class:`bool`
A boolean indicating if the application command is on cooldown.
"""
if not self._buckets.valid:
return False
bucket = self._buckets.get_bucket(inter) # type: ignore
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_tokens(current) == 0
def reset_cooldown(self, inter: ApplicationCommandInteraction) -> None:
"""Resets the cooldown on this application command.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with this application command
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(inter) # type: ignore
bucket.reset()
def get_cooldown_retry_after(self, inter: ApplicationCommandInteraction) -> float:
"""Retrieves the amount of seconds before this application command can be tried again.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with this application command.
Returns
--------
:class:`float`
The amount of time left on this command's cooldown in seconds.
If this is ``0.0`` then the command isn't on cooldown.
"""
if self._buckets.valid:
bucket = self._buckets.get_bucket(inter) # type: ignore
dt = inter.created_at
current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()
return bucket.get_retry_after(current)
return 0.0
async def invoke(self, inter: ApplicationCommandInteraction, *args, **kwargs) -> None:
"""
This method isn't really usable in this class, but it's usable in subclasses.
"""
if self.guild_only and inter.guild_id is None:
await inter.response.send_message("This command cannot be used in DMs", ephemeral=True)
return
await self.prepare(inter)
try:
await self(inter, *args, **kwargs)
except CommandError:
inter.command_failed = True
raise
except asyncio.CancelledError:
inter.command_failed = True
return
except Exception as exc:
inter.command_failed = True
raise CommandInvokeError(exc) from exc
finally:
if self._max_concurrency is not None:
await self._max_concurrency.release(inter) # type: ignore
await self.call_after_hooks(inter)
def error(self, coro: ErrorT) -> ErrorT:
"""A decorator that registers a coroutine as a local error handler.
A local error handler is an error event limited to a single application command.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the local error handler.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The error handler must be a coroutine.")
self.on_error: Error = coro
return coro
def has_error_handler(self) -> bool:
"""
Checks whether the application command has an error handler registered.
"""
return hasattr(self, "on_error")
async def _call_local_error_handler(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> Any:
if not self.has_error_handler():
return
injected = wrap_callback(self.on_error)
if self.cog is not None:
return await injected(self.cog, inter, error)
else:
return await injected(inter, error)
async def _call_external_error_handlers(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> None:
"""Overridden in subclasses"""
raise error
async def dispatch_error(
self, inter: ApplicationCommandInteraction, error: CommandError
) -> None:
if not await self._call_local_error_handler(inter, error):
await self._call_external_error_handlers(inter, error)
async def call_before_hooks(self, inter: ApplicationCommandInteraction) -> None:
# now that we're done preparing we can call the pre-command hooks
# first, call the command local hook:
cog = self.cog
if self._before_invoke is not None:
# should be cog if @commands.before_invoke is used
instance = getattr(self._before_invoke, "__self__", cog)
# __self__ only exists for methods, not functions
# however, if @command.before_invoke is used, it will be a function
if instance:
await self._before_invoke(instance, inter) # type: ignore
else:
await self._before_invoke(inter) # type: ignore
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return
# call the cog local hook if applicable:
if cog is not None:
meth = getattr(cog, f"cog_before_{partial_attr_name}_invoke", None)
hook = _get_overridden_method(meth)
if hook is not None:
await hook(inter)
# call the bot global hook if necessary
hook = getattr(inter.bot, f"_before_{partial_attr_name}_invoke", None)
if hook is not None:
await hook(inter)
async def call_after_hooks(self, inter: ApplicationCommandInteraction) -> None:
cog = self.cog
if self._after_invoke is not None:
instance = getattr(self._after_invoke, "__self__", cog)
if instance:
await self._after_invoke(instance, inter) # type: ignore
else:
await self._after_invoke(inter) # type: ignore
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return
# call the cog local hook if applicable:
if cog is not None:
meth = getattr(cog, f"cog_after_{partial_attr_name}_invoke", None)
hook = _get_overridden_method(meth)
if hook is not None:
await hook(inter)
# call the bot global hook if necessary
hook = getattr(inter.bot, f"_after_{partial_attr_name}_invoke", None)
if hook is not None:
await hook(inter)
def before_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is called.
This pre-invoke hook takes a sole parameter, a :class:`.ApplicationCommandInteraction`.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: HookT) -> HookT:
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is called.
This post-invoke hook takes a sole parameter, a :class:`.ApplicationCommandInteraction`.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
@property
def cog_name(self) -> Optional[str]:
"""Optional[:class:`str`]: The name of the cog this application command belongs to, if any."""
return type(self.cog).__cog_name__ if self.cog is not None else None
async def can_run(self, inter: ApplicationCommandInteraction) -> bool:
"""|coro|
Checks if the command can be executed by checking all the predicates
inside the :attr:`~Command.checks` attribute.
Parameters
-----------
inter: :class:`.ApplicationCommandInteraction`
The interaction with the application command currently being invoked.
Raises
-------
:class:`CommandError`
Any application command error that was raised during a check call will be propagated
by this function.
Returns
--------
:class:`bool`
A boolean indicating if the application command can be invoked.
"""
original = inter.application_command
inter.application_command = self
if inter.data.type is ApplicationCommandType.chat_input:
partial_attr_name = "slash_command"
elif inter.data.type is ApplicationCommandType.user:
partial_attr_name = "user_command"
elif inter.data.type is ApplicationCommandType.message:
partial_attr_name = "message_command"
else:
return True
try:
if inter.bot and not await inter.bot.application_command_can_run(inter):
raise CheckFailure(
f"The global check functions for command {self.qualified_name} failed."
)
cog = self.cog
if cog is not None:
meth = getattr(cog, f"cog_{partial_attr_name}_check", None)
local_check = _get_overridden_method(meth)
if local_check is not None:
ret = await maybe_coroutine(local_check, inter)
if not ret:
return False
predicates = self.checks
if not predicates:
# since we have no checks, then we just return True.
return True
return await async_all(predicate(inter) for predicate in predicates) # type: ignore
finally:
inter.application_command = original
# kwargs are annotated as None to ensure the user gets a linter error when using them
def guild_permissions(
guild_id: int,
*,
roles: Optional[Mapping[int, bool]] = None,
users: Optional[Mapping[int, bool]] = None,
owner: bool = None,
**kwargs: None,
) -> Callable[[T], T]:
"""
A decorator that sets application command permissions in the specified guild.
This type of permissions "greys out" the command in the command picker.
If you want to change this type of permissions dynamically, this decorator is not useful.
Parameters
----------
guild_id: :class:`int`
the ID of the guild to apply the permissions to.
roles: Mapping[:class:`int`, :class:`bool`]
a mapping of role IDs to boolean values indicating the permission. ``True`` = allow, ``False`` = deny.
users: Mapping[:class:`int`, :class:`bool`]
a mapping of user IDs to boolean values indicating the permission. ``True`` = allow, ``False`` = deny.
owner: :class:`bool`
whether to allow/deny the bot owner(s) to use the command. Set to ``None`` to ignore.
"""
if kwargs:
warn_deprecated(
f"guild_permissions got unexpected deprecated keyword arguments: {', '.join(map(repr, kwargs))}",
stacklevel=2,
)
roles = roles or kwargs.get("role_ids")
users = users or kwargs.get("user_ids")
perms = UnresolvedGuildApplicationCommandPermissions(
role_ids=roles, user_ids=users, owner=owner
)
def decorator(func: T) -> T:
if isinstance(func, InvokableApplicationCommand):
func.permissions[guild_id] = perms
else:
if not hasattr(func, "__app_command_permissions__"):
func.__app_command_permissions__ = {} # type: ignore
func.__app_command_permissions__[guild_id] = perms # type: ignore
return func
return decorator
|
# Licensed under the MIT License
# https://github.com/craigahobbs/chisel/blob/main/LICENSE
"""
Chisel action class
"""
from cgi import parse_header
from functools import partial
from http import HTTPStatus
from json import loads as json_loads
from schema_markdown import SchemaMarkdownParser, ValidationError, decode_query_string, get_referenced_types, validate_type
from .app import Context
from .request import Request
def action(action_callback=None, **kwargs):
"""
Decorator for creating an :class:`~chisel.Action` object that wraps an action callback function. For example:
>>> @chisel.action(spec='''
... # Sum a list of numbers
... action sum_numbers
... urls
... GET
... query
... # The list of numbers to sum
... int[len > 0] numbers
... output
... # The sum of the numbers
... int sum
... ''')
... def sum_numbers(ctx, req):
... return {'sum': sum(req['numbers'])}
...
>>> application = chisel.Application()
>>> application.add_request(sum_numbers)
>>> application.request('GET', '/sum_numbers', query_string='numbers.0=1&numbers.1=2&numbers.2=3')
('200 OK', [('Content-Type', 'application/json')], b'{"sum":6}')
Chisel actions schema-validate their input before calling the callback function. For example:
>>> status, _, response = application.request('GET', '/sum_numbers', query_string='numbers=1')
>>> status
'400 Bad Request'
>>> import json
>>> from pprint import pprint
>>> pprint(json.loads(response.decode('utf-8')))
{'error': 'InvalidInput',
'member': 'numbers',
'message': "Invalid value '1' (type 'str') for member 'numbers', expected "
"type 'array' (query string)"}
When :attr:`~chisel.Application.validate_output` the response dictionary is also validated to the output schema.
:param ~collections.abc.Callable action_callback: The action callback function
"""
if action_callback is None:
return partial(action, **kwargs)
return Action(action_callback, **kwargs).decorate_module(action_callback)
class ActionError(Exception):
"""
An action error exception. Raise this exception within an action callback function to respond with an error.
>>> @chisel.action(spec='''
... action my_action
... urls
... GET
... errors
... AlwaysError
... ''')
... def my_action(ctx, req):
... raise chisel.ActionError('AlwaysError')
...
>>> application = chisel.Application()
>>> application.add_request(my_action)
>>> application.request('GET', '/my_action')
('400 Bad Request', [('Content-Type', 'application/json')], b'{"error":"AlwaysError"}')
:param str error: The error code
:param str message: Optional error message
:param status: The HTTP response status
:type status: ~http.HTTPStatus or str
"""
__slots__ = ('error', 'message', 'status')
def __init__(self, error, message=None, status=None):
super().__init__(error)
#: The error code
self.error = error
#: The error message or None
self.message = message
#: The HTTP response status
self.status = status
class _ActionErrorInternal(Exception):
__slots__ = ('status', 'error', 'message', 'member')
def __init__(self, status, error, message=None, member=None):
super().__init__(error)
self.status = status
self.error = error
self.message = message
self.member = member
class Action(Request):
"""
A schema-validated, JSON API request. An Action wraps a callback function that it calls when a request occurs. Here's
an example of an action callback function:
>>> def my_action(ctx, req):
... return {}
The first arugument, "ctx", is the :class:`~chisel.Context` object. The second argument is the request object which
contiains the schema-validated, combined path parameters, query string parameters, and JSON request content
parameters.
:param ~collections.abc.Callable action_callback: The action callback function
:param str name: The action request name
:param list(tuple) urls: The list of URL method/path tuples. The first value is the HTTP request method (e.g. 'GET')
or None to match any. The second value is the URL path or None to use the default path.
:param dict types: Optional dictionary of user type models
:param str spec: Optional action :ref:`schema-markdown:Schema Markdown` specification.
If a specification isn't provided it can be provided through the "types" argument.
:param bool wsgi_response: If True, the callback function's response is a WSGI application function
response. Default is False.
:param str jsonp: Optional JSONP key
"""
__slots__ = ('action_callback', 'types', 'wsgi_response', 'jsonp')
def __init__(self, action_callback, name=None, urls=(('POST', None),), types=None, spec=None, wsgi_response=False, jsonp=None):
# Use the action callback name if no name is provided
if name is None:
name = action_callback.__name__
# Spec provided?
if types is None:
types = {}
if spec is not None:
SchemaMarkdownParser(spec, types=types)
# Assert that the action model exists
model_type = types.get(name)
model = model_type.get('action') if model_type is not None else None
assert model is not None, f'Unknown action "{name}"'
# Get the model's URLs, if any
if 'urls' in model:
urls = [(url.get('method'), url.get('path')) for url in model['urls']]
# Initialize Request
super().__init__(name=name, urls=urls, doc=model.get('doc'), doc_group=model.get('docGroup'))
#: The action callback function
self.action_callback = action_callback
#: The user type model dictionary that contains the action model and all referenced user types
self.types = types
#: If True, the callback function's response is a WSGI application function response.
self.wsgi_response = wsgi_response
#: JSONP key or None
self.jsonp = jsonp
@property
def model(self):
"""Get the action model"""
return self.types[self.name]['action']
def _get_section_type(self, section):
model = self.model
if section in model:
section_type_name = model[section]
section_types = self.types
else:
# No section type - create an empty struct type
section_type_name = f'{model['name']}_{section}'
section_types = {
section_type_name: {
'struct': {
'name': section_type_name
}
}
}
return section_types, section_type_name
def _get_error_type(self):
model = self.model
output_type_name = f'{model['name']}_output_error'
if 'errors' in model:
error_type_name = model['errors']
output_types = get_referenced_types(self.types, error_type_name)
else:
error_type_name = f'{model['name']}_errors'
output_types = {error_type_name: {'enum': {'name': error_type_name}}}
output_types[output_type_name] = {
'struct': {
'name': output_type_name,
'members': [
{'name': 'error', 'type': {'user': error_type_name}},
{'name': 'message', 'type': {'builtin': 'string'}, 'optional': True}
]
}
}
return output_types, output_type_name
def __call__(self, environ, unused_start_response):
ctx = environ[Context.ENVIRON_CTX]
# Handle the action
is_get = (environ['REQUEST_METHOD'] == 'GET')
jsonp = None
validate_output = True
try:
# Read the request content
try:
content = None if is_get else environ['wsgi.input'].read()
except:
raise _ActionErrorInternal(HTTPStatus.REQUEST_TIMEOUT, 'IOError', message='Error reading request content')
# De-serialize the JSON content
try:
if content:
content_type = environ.get('CONTENT_TYPE')
content_charset = ('utf-8' if content_type is None else parse_header(content_type)[1].get('charset', 'utf-8'))
content_json = content.decode(content_charset)
request = json_loads(content_json)
else:
request = {}
except Exception as exc:
ctx.log.warning("Error decoding JSON content for action '%s'", self.name)
raise _ActionErrorInternal(HTTPStatus.BAD_REQUEST, 'InvalidInput', message=f'Invalid request JSON: {exc}')
# Validate the content
input_types, input_type = self._get_section_type('input')
try:
request = validate_type(input_types, input_type, request)
except ValidationError as exc:
ctx.log.warning("Invalid content for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (content)',
member=exc.member
)
# Decode the query string
query_string = environ.get('QUERY_STRING', '')
try:
request_query = decode_query_string(query_string)
except Exception as exc:
ctx.log.warning("Error decoding query string for action '%s': %.1000r", self.name, query_string)
raise _ActionErrorInternal(HTTPStatus.BAD_REQUEST, 'InvalidInput', message=f'{exc}')
# JSONP?
if is_get and self.jsonp and self.jsonp in request_query:
jsonp = f'{request_query[self.jsonp]}'
del request_query[self.jsonp]
# Validate the query string
query_types, query_type = self._get_section_type('query')
try:
request_query = validate_type(query_types, query_type, request_query)
except ValidationError as exc:
ctx.log.warning("Invalid query string for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (query string)',
member=exc.member
)
# Validate the path args
path_types, path_type = self._get_section_type('path')
request_path = ctx.url_args if ctx.url_args is not None else {}
try:
request_path = validate_type(path_types, path_type, request_path)
except ValidationError as exc:
ctx.log.warning("Invalid path for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (path)',
member=exc.member
)
# Copy top-level path keys and query string keys
for request_key, request_value in request_path.items():
request[request_key] = request_value
for request_key, request_value in request_query.items():
request[request_key] = request_value
# Call the action callback
try:
status = HTTPStatus.OK
response = self.action_callback(ctx, request)
if self.wsgi_response:
return response
if response is None:
response = {}
output_types, output_type = self._get_section_type('output')
except ActionError as exc:
status = exc.status or HTTPStatus.BAD_REQUEST
response = {'error': exc.error}
if exc.message is not None:
response['message'] = exc.message
if ctx.app.validate_output:
if exc.error in ('UnexpectedError',):
validate_output = False
else:
output_types, output_type = self._get_error_type()
except Exception as exc:
ctx.log.exception("Unexpected error in action '%s'", self.name)
raise _ActionErrorInternal(HTTPStatus.INTERNAL_SERVER_ERROR, 'UnexpectedError')
# Validate the response
if validate_output and ctx.app.validate_output:
try:
validate_type(output_types, output_type, response)
except ValidationError as exc:
ctx.log.error("Invalid output returned from action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(HTTPStatus.INTERNAL_SERVER_ERROR, 'InvalidOutput', message=f'{exc}', member=exc.member)
except _ActionErrorInternal as exc:
status = exc.status
response = {'error': exc.error}
if exc.message is not None:
response['message'] = exc.message
if exc.member is not None:
response['member'] = exc.member
# Serialize the response as JSON
return ctx.response_json(status, response, jsonp=jsonp)
| # Licensed under the MIT License
# https://github.com/craigahobbs/chisel/blob/main/LICENSE
"""
Chisel action class
"""
from cgi import parse_header
from functools import partial
from http import HTTPStatus
from json import loads as json_loads
from schema_markdown import SchemaMarkdownParser, ValidationError, decode_query_string, get_referenced_types, validate_type
from .app import Context
from .request import Request
def action(action_callback=None, **kwargs):
"""
Decorator for creating an :class:`~chisel.Action` object that wraps an action callback function. For example:
>>> @chisel.action(spec='''
... # Sum a list of numbers
... action sum_numbers
... urls
... GET
... query
... # The list of numbers to sum
... int[len > 0] numbers
... output
... # The sum of the numbers
... int sum
... ''')
... def sum_numbers(ctx, req):
... return {'sum': sum(req['numbers'])}
...
>>> application = chisel.Application()
>>> application.add_request(sum_numbers)
>>> application.request('GET', '/sum_numbers', query_string='numbers.0=1&numbers.1=2&numbers.2=3')
('200 OK', [('Content-Type', 'application/json')], b'{"sum":6}')
Chisel actions schema-validate their input before calling the callback function. For example:
>>> status, _, response = application.request('GET', '/sum_numbers', query_string='numbers=1')
>>> status
'400 Bad Request'
>>> import json
>>> from pprint import pprint
>>> pprint(json.loads(response.decode('utf-8')))
{'error': 'InvalidInput',
'member': 'numbers',
'message': "Invalid value '1' (type 'str') for member 'numbers', expected "
"type 'array' (query string)"}
When :attr:`~chisel.Application.validate_output` the response dictionary is also validated to the output schema.
:param ~collections.abc.Callable action_callback: The action callback function
"""
if action_callback is None:
return partial(action, **kwargs)
return Action(action_callback, **kwargs).decorate_module(action_callback)
class ActionError(Exception):
"""
An action error exception. Raise this exception within an action callback function to respond with an error.
>>> @chisel.action(spec='''
... action my_action
... urls
... GET
... errors
... AlwaysError
... ''')
... def my_action(ctx, req):
... raise chisel.ActionError('AlwaysError')
...
>>> application = chisel.Application()
>>> application.add_request(my_action)
>>> application.request('GET', '/my_action')
('400 Bad Request', [('Content-Type', 'application/json')], b'{"error":"AlwaysError"}')
:param str error: The error code
:param str message: Optional error message
:param status: The HTTP response status
:type status: ~http.HTTPStatus or str
"""
__slots__ = ('error', 'message', 'status')
def __init__(self, error, message=None, status=None):
super().__init__(error)
#: The error code
self.error = error
#: The error message or None
self.message = message
#: The HTTP response status
self.status = status
class _ActionErrorInternal(Exception):
__slots__ = ('status', 'error', 'message', 'member')
def __init__(self, status, error, message=None, member=None):
super().__init__(error)
self.status = status
self.error = error
self.message = message
self.member = member
class Action(Request):
"""
A schema-validated, JSON API request. An Action wraps a callback function that it calls when a request occurs. Here's
an example of an action callback function:
>>> def my_action(ctx, req):
... return {}
The first arugument, "ctx", is the :class:`~chisel.Context` object. The second argument is the request object which
contiains the schema-validated, combined path parameters, query string parameters, and JSON request content
parameters.
:param ~collections.abc.Callable action_callback: The action callback function
:param str name: The action request name
:param list(tuple) urls: The list of URL method/path tuples. The first value is the HTTP request method (e.g. 'GET')
or None to match any. The second value is the URL path or None to use the default path.
:param dict types: Optional dictionary of user type models
:param str spec: Optional action :ref:`schema-markdown:Schema Markdown` specification.
If a specification isn't provided it can be provided through the "types" argument.
:param bool wsgi_response: If True, the callback function's response is a WSGI application function
response. Default is False.
:param str jsonp: Optional JSONP key
"""
__slots__ = ('action_callback', 'types', 'wsgi_response', 'jsonp')
def __init__(self, action_callback, name=None, urls=(('POST', None),), types=None, spec=None, wsgi_response=False, jsonp=None):
# Use the action callback name if no name is provided
if name is None:
name = action_callback.__name__
# Spec provided?
if types is None:
types = {}
if spec is not None:
SchemaMarkdownParser(spec, types=types)
# Assert that the action model exists
model_type = types.get(name)
model = model_type.get('action') if model_type is not None else None
assert model is not None, f'Unknown action "{name}"'
# Get the model's URLs, if any
if 'urls' in model:
urls = [(url.get('method'), url.get('path')) for url in model['urls']]
# Initialize Request
super().__init__(name=name, urls=urls, doc=model.get('doc'), doc_group=model.get('docGroup'))
#: The action callback function
self.action_callback = action_callback
#: The user type model dictionary that contains the action model and all referenced user types
self.types = types
#: If True, the callback function's response is a WSGI application function response.
self.wsgi_response = wsgi_response
#: JSONP key or None
self.jsonp = jsonp
@property
def model(self):
"""Get the action model"""
return self.types[self.name]['action']
def _get_section_type(self, section):
model = self.model
if section in model:
section_type_name = model[section]
section_types = self.types
else:
# No section type - create an empty struct type
section_type_name = f'{model["name"]}_{section}'
section_types = {
section_type_name: {
'struct': {
'name': section_type_name
}
}
}
return section_types, section_type_name
def _get_error_type(self):
model = self.model
output_type_name = f'{model["name"]}_output_error'
if 'errors' in model:
error_type_name = model['errors']
output_types = get_referenced_types(self.types, error_type_name)
else:
error_type_name = f'{model["name"]}_errors'
output_types = {error_type_name: {'enum': {'name': error_type_name}}}
output_types[output_type_name] = {
'struct': {
'name': output_type_name,
'members': [
{'name': 'error', 'type': {'user': error_type_name}},
{'name': 'message', 'type': {'builtin': 'string'}, 'optional': True}
]
}
}
return output_types, output_type_name
def __call__(self, environ, unused_start_response):
ctx = environ[Context.ENVIRON_CTX]
# Handle the action
is_get = (environ['REQUEST_METHOD'] == 'GET')
jsonp = None
validate_output = True
try:
# Read the request content
try:
content = None if is_get else environ['wsgi.input'].read()
except:
raise _ActionErrorInternal(HTTPStatus.REQUEST_TIMEOUT, 'IOError', message='Error reading request content')
# De-serialize the JSON content
try:
if content:
content_type = environ.get('CONTENT_TYPE')
content_charset = ('utf-8' if content_type is None else parse_header(content_type)[1].get('charset', 'utf-8'))
content_json = content.decode(content_charset)
request = json_loads(content_json)
else:
request = {}
except Exception as exc:
ctx.log.warning("Error decoding JSON content for action '%s'", self.name)
raise _ActionErrorInternal(HTTPStatus.BAD_REQUEST, 'InvalidInput', message=f'Invalid request JSON: {exc}')
# Validate the content
input_types, input_type = self._get_section_type('input')
try:
request = validate_type(input_types, input_type, request)
except ValidationError as exc:
ctx.log.warning("Invalid content for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (content)',
member=exc.member
)
# Decode the query string
query_string = environ.get('QUERY_STRING', '')
try:
request_query = decode_query_string(query_string)
except Exception as exc:
ctx.log.warning("Error decoding query string for action '%s': %.1000r", self.name, query_string)
raise _ActionErrorInternal(HTTPStatus.BAD_REQUEST, 'InvalidInput', message=f'{exc}')
# JSONP?
if is_get and self.jsonp and self.jsonp in request_query:
jsonp = f'{request_query[self.jsonp]}'
del request_query[self.jsonp]
# Validate the query string
query_types, query_type = self._get_section_type('query')
try:
request_query = validate_type(query_types, query_type, request_query)
except ValidationError as exc:
ctx.log.warning("Invalid query string for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (query string)',
member=exc.member
)
# Validate the path args
path_types, path_type = self._get_section_type('path')
request_path = ctx.url_args if ctx.url_args is not None else {}
try:
request_path = validate_type(path_types, path_type, request_path)
except ValidationError as exc:
ctx.log.warning("Invalid path for action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(
HTTPStatus.BAD_REQUEST,
'InvalidInput',
message=f'{exc} (path)',
member=exc.member
)
# Copy top-level path keys and query string keys
for request_key, request_value in request_path.items():
request[request_key] = request_value
for request_key, request_value in request_query.items():
request[request_key] = request_value
# Call the action callback
try:
status = HTTPStatus.OK
response = self.action_callback(ctx, request)
if self.wsgi_response:
return response
if response is None:
response = {}
output_types, output_type = self._get_section_type('output')
except ActionError as exc:
status = exc.status or HTTPStatus.BAD_REQUEST
response = {'error': exc.error}
if exc.message is not None:
response['message'] = exc.message
if ctx.app.validate_output:
if exc.error in ('UnexpectedError',):
validate_output = False
else:
output_types, output_type = self._get_error_type()
except Exception as exc:
ctx.log.exception("Unexpected error in action '%s'", self.name)
raise _ActionErrorInternal(HTTPStatus.INTERNAL_SERVER_ERROR, 'UnexpectedError')
# Validate the response
if validate_output and ctx.app.validate_output:
try:
validate_type(output_types, output_type, response)
except ValidationError as exc:
ctx.log.error("Invalid output returned from action '%s': %s", self.name, f'{exc}')
raise _ActionErrorInternal(HTTPStatus.INTERNAL_SERVER_ERROR, 'InvalidOutput', message=f'{exc}', member=exc.member)
except _ActionErrorInternal as exc:
status = exc.status
response = {'error': exc.error}
if exc.message is not None:
response['message'] = exc.message
if exc.member is not None:
response['member'] = exc.member
# Serialize the response as JSON
return ctx.response_json(status, response, jsonp=jsonp)
|
import asyncio
import subprocess
import concurrent
import click
import aiohttp
from pyartcd.cli import cli, click_coroutine, pass_runtime
from pyartcd.runtime import Runtime
BASE_URL = 'https://api.openshift.com/api/upgrades_info/v1/graph?arch=amd64&channel=fast'
ELLIOTT_BIN = 'elliott'
async def is_ga(version: str, session):
# 3.11 is an exception, no need to query Openshift API
if version == '3.11':
return True
url = f'{BASE_URL}-{version}'
# A release is considered GA'd if nodes are found
async with session.get(url, headers={'Accept': 'application/json'}) as response:
assert response.status == 200
response.raise_for_status()
response_body = await response.json()
nodes = response_body['nodes']
return len(nodes) > 0
def get_next_version(version: str) -> str:
major, minor = version.split('.')[:2]
return '.'.join([major, str(int(minor) + 1)])
class CheckBugsPipeline:
def __init__(self, runtime: Runtime, channel: str, versions: list, pre_releases: list) -> None:
self.runtime = runtime
self.versions = versions
self.pre_releases = pre_releases
self.logger = runtime.logger
self.applicable_versions = []
self.blockers = {}
self.regressions = {}
self.slack_client = self.initialize_slack_client(runtime, channel)
@staticmethod
def initialize_slack_client(runtime: Runtime, channel: str):
if not channel.startswith('#'):
raise ValueError('Invalid Slack channel name provided')
slack_client = runtime.new_slack_client()
slack_client.bind_channel(channel)
return slack_client
async def run(self):
# Check applicable OCP versions
await self._check_applicable_versions()
# Find blocker bugs
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_blockers, v))
for f in futures:
try:
self.blockers.update(f.result())
except TypeError:
# In case no blockers have been found
pass
# Find regressions
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_regressions, v))
for f in futures:
try:
self.regressions.update(f.result())
except TypeError:
# In case no regressions have been found
pass
# Notify Slack
await self._slack_report()
self.logger.info('All done!')
async def _check_applicable_versions(self):
ga_info = {}
async with aiohttp.ClientSession() as session:
tasks = []
for v in self.versions:
tasks.append(asyncio.ensure_future(is_ga(v, session)))
responses = await asyncio.gather(*tasks)
ga_info = dict(zip(self.versions, responses))
self.applicable_versions = [v for v in self.versions if ga_info.get(v, True)]
if self.applicable_versions:
self.logger.info(f'Found applicable versions: {' '.join(self.applicable_versions)}')
else:
self.logger.warning('No applicable versions found')
def _find_blockers(self, version: str):
self.logger.info(f'Checking blocker bugs for Openshift {version}')
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:blocker',
'--output=slack'
]
self.logger.info(f'Executing command: {' '.join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
out = out.decode().strip().splitlines()
if not out:
self.logger.info('No blockers found for version %s', version)
return None
self.logger.info('Cmd returned: %s', out)
return {version: out}
def _find_regressions(self, version: str):
# Do nothing for 3.11
if version == '3.11':
return None
# Check pre-release
if self._next_is_prerelease(version):
self.logger.info(
'Version %s is in pre-release state: skipping regression checks for %s',
get_next_version(version), version
)
return None
self.logger.info(f'Checking possible regressions for Openshift {version}')
# Find bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:sweep'
]
self.logger.info(f'Executing command: {' '.join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
# First line in elliott stdout is something like "Searching for bugs..."
# Next line (if present) goes like this: "Found N bugs (M ignored):"
# Following is a list of bugs that we need to process
out = out.decode().strip().splitlines()
if len(out) < 2:
return None
bugs = out[-1].split(':')[1].split(', ')
# Verify bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'verify-bugs',
'--output=slack'
]
cmd.extend(bugs)
self.logger.info(f'Executing command: {' '.join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = process.communicate()
# If process returned 0, no regressions were found
if not process.returncode:
self.logger.info('No regressions found for version %s', version)
return None
out = out.decode().strip().splitlines()
res = {version: out} if out else None
return res
def _next_is_prerelease(self, version: str) -> bool:
return get_next_version(version) in self.pre_releases
async def _slack_report(self):
# If no issues have been found, do nothing
if not any((self.blockers, self.regressions)):
return
# Merge results
from collections import defaultdict
report = defaultdict(list)
for d in (self.blockers, self.regressions):
for k, v in d.items():
report[k].extend(v)
# Format output message
message = ':red-siren: *There are some issues to look into:*'
for k in report.keys():
message += f'\n:warning:*{k}*'
for i in report[k]:
message += f'\n{i}'
self.logger.info('Sending notification to Slack')
self.logger.debug(message)
await self.slack_client.say(message)
@cli.command('check-bugs')
@click.option('--slack_channel', required=False, default='#art-team',
help='Slack channel to be notified for failures')
@click.option('--version', required=True, multiple=True,
help='OCP version to check for blockers e.g. 4.7')
@click.option('--pre_release', required=False, multiple=True,
help='OCP versions still in pre-release state')
@pass_runtime
@click_coroutine
async def check_bugs(runtime: Runtime, slack_channel: str, version: list, pre_release: list):
pipeline = CheckBugsPipeline(runtime, channel=slack_channel, versions=version, pre_releases=pre_release)
await pipeline.run()
| import asyncio
import subprocess
import concurrent
import click
import aiohttp
from pyartcd.cli import cli, click_coroutine, pass_runtime
from pyartcd.runtime import Runtime
BASE_URL = 'https://api.openshift.com/api/upgrades_info/v1/graph?arch=amd64&channel=fast'
ELLIOTT_BIN = 'elliott'
async def is_ga(version: str, session):
# 3.11 is an exception, no need to query Openshift API
if version == '3.11':
return True
url = f'{BASE_URL}-{version}'
# A release is considered GA'd if nodes are found
async with session.get(url, headers={'Accept': 'application/json'}) as response:
assert response.status == 200
response.raise_for_status()
response_body = await response.json()
nodes = response_body['nodes']
return len(nodes) > 0
def get_next_version(version: str) -> str:
major, minor = version.split('.')[:2]
return '.'.join([major, str(int(minor) + 1)])
class CheckBugsPipeline:
def __init__(self, runtime: Runtime, channel: str, versions: list, pre_releases: list) -> None:
self.runtime = runtime
self.versions = versions
self.pre_releases = pre_releases
self.logger = runtime.logger
self.applicable_versions = []
self.blockers = {}
self.regressions = {}
self.slack_client = self.initialize_slack_client(runtime, channel)
@staticmethod
def initialize_slack_client(runtime: Runtime, channel: str):
if not channel.startswith('#'):
raise ValueError('Invalid Slack channel name provided')
slack_client = runtime.new_slack_client()
slack_client.bind_channel(channel)
return slack_client
async def run(self):
# Check applicable OCP versions
await self._check_applicable_versions()
# Find blocker bugs
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_blockers, v))
for f in futures:
try:
self.blockers.update(f.result())
except TypeError:
# In case no blockers have been found
pass
# Find regressions
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for v in self.applicable_versions:
futures.append(executor.submit(self._find_regressions, v))
for f in futures:
try:
self.regressions.update(f.result())
except TypeError:
# In case no regressions have been found
pass
# Notify Slack
await self._slack_report()
self.logger.info('All done!')
async def _check_applicable_versions(self):
ga_info = {}
async with aiohttp.ClientSession() as session:
tasks = []
for v in self.versions:
tasks.append(asyncio.ensure_future(is_ga(v, session)))
responses = await asyncio.gather(*tasks)
ga_info = dict(zip(self.versions, responses))
self.applicable_versions = [v for v in self.versions if ga_info.get(v, True)]
if self.applicable_versions:
self.logger.info(f'Found applicable versions: {" ".join(self.applicable_versions)}')
else:
self.logger.warning('No applicable versions found')
def _find_blockers(self, version: str):
self.logger.info(f'Checking blocker bugs for Openshift {version}')
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:blocker',
'--output=slack'
]
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
out = out.decode().strip().splitlines()
if not out:
self.logger.info('No blockers found for version %s', version)
return None
self.logger.info('Cmd returned: %s', out)
return {version: out}
def _find_regressions(self, version: str):
# Do nothing for 3.11
if version == '3.11':
return None
# Check pre-release
if self._next_is_prerelease(version):
self.logger.info(
'Version %s is in pre-release state: skipping regression checks for %s',
get_next_version(version), version
)
return None
self.logger.info(f'Checking possible regressions for Openshift {version}')
# Find bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'find-bugs:sweep'
]
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
errcode = process.returncode
if errcode:
self.logger.error(f'Command {cmd} failed with {errcode}: see output below')
self.logger.info(err)
return None
# First line in elliott stdout is something like "Searching for bugs..."
# Next line (if present) goes like this: "Found N bugs (M ignored):"
# Following is a list of bugs that we need to process
out = out.decode().strip().splitlines()
if len(out) < 2:
return None
bugs = out[-1].split(':')[1].split(', ')
# Verify bugs
cmd = [
ELLIOTT_BIN,
f'--group=openshift-{version}',
f'--working-dir={version}-working',
'verify-bugs',
'--output=slack'
]
cmd.extend(bugs)
self.logger.info(f'Executing command: {" ".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = process.communicate()
# If process returned 0, no regressions were found
if not process.returncode:
self.logger.info('No regressions found for version %s', version)
return None
out = out.decode().strip().splitlines()
res = {version: out} if out else None
return res
def _next_is_prerelease(self, version: str) -> bool:
return get_next_version(version) in self.pre_releases
async def _slack_report(self):
# If no issues have been found, do nothing
if not any((self.blockers, self.regressions)):
return
# Merge results
from collections import defaultdict
report = defaultdict(list)
for d in (self.blockers, self.regressions):
for k, v in d.items():
report[k].extend(v)
# Format output message
message = ':red-siren: *There are some issues to look into:*'
for k in report.keys():
message += f'\n:warning:*{k}*'
for i in report[k]:
message += f'\n{i}'
self.logger.info('Sending notification to Slack')
self.logger.debug(message)
await self.slack_client.say(message)
@cli.command('check-bugs')
@click.option('--slack_channel', required=False, default='#art-team',
help='Slack channel to be notified for failures')
@click.option('--version', required=True, multiple=True,
help='OCP version to check for blockers e.g. 4.7')
@click.option('--pre_release', required=False, multiple=True,
help='OCP versions still in pre-release state')
@pass_runtime
@click_coroutine
async def check_bugs(runtime: Runtime, slack_channel: str, version: list, pre_release: list):
pipeline = CheckBugsPipeline(runtime, channel=slack_channel, versions=version, pre_releases=pre_release)
await pipeline.run()
|
# type: ignore
import logging
import warnings
from typing import Union, List, Any, Tuple, Dict
from urllib.parse import urlparse
import requests
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject
from pymisp.tools import EMailObject, GenericObjectGenerator
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
requests.packages.urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
PARAMS = demisto.params()
MISP_KEY = PARAMS.get('api_key')
MISP_URL = PARAMS.get('url')
USE_SSL = not PARAMS.get('insecure')
proxies = handle_proxy() # type: ignore
MISP_PATH = 'MISP.Event(obj.ID === val.ID)'
MISP_ATTRIBUTE_PATH = 'MISP.Attribute(obj.ID === val.ID)'
MISP = ExpandedPyMISP(url=MISP_URL, key=MISP_KEY, ssl=USE_SSL, proxies=proxies) # type: ExpandedPyMISP
DATA_KEYS_TO_SAVE = PARAMS.get('context_select', [])
try:
MAX_ATTRIBUTES = int(PARAMS.get('attributes_limit') or 1000)
except ValueError:
return_error("Maximum attributes in event must be a positive number")
else:
if MAX_ATTRIBUTES < 1:
return_error("Maximum attributes in event must be a positive number")
"""
dict format :
MISP key:DEMISTO key
"""
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
ENTITIESDICT = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'Timestamp',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'Date',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganisationID',
'Org': 'Organisation',
'Orgc': 'OwnerOrganisation',
'orgc_uuid': 'OwnerOrganisation.UUID',
'orgc_id': 'OwnerOrganisation.ID',
'orgc_name': 'OwnerOrganisation.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore'
}
THREAT_LEVELS_WORDS = {
'1': 'HIGH',
'2': 'MEDIUM',
'3': 'LOW',
'4': 'UNDEFINED'
}
THREAT_LEVELS_NUMBERS = {
'high': 1,
'medium': 2,
'low': 3,
'undefined': 4
}
ANALYSIS_WORDS = {
'0': 'Initial',
'1': 'Ongoing',
'2': 'Completed'
}
ANALYSIS_NUMBERS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
DISTRIBUTION_NUMBERS = {
'Your_organisation_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3
}
''' HELPER FUNCTIONS '''
def extract_error(error: list) -> List[dict]:
"""Extracting errors
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
Examples:
extract_error([
(403,
{
'name': 'Could not add object',
'message': 'Could not add object',
'url': '/objects/add/156/',
'errors': 'Could not save object as at least one attribute has failed validation (ip). \
{"value":["IP address has an invalid format."]}'
}
)
])
Response:
[{
'code': 403,
'message': 'Could not add object',
'errors': 'Could not save object as at least one attribute has failed validation (ip). \
{"value":["IP address has an invalid format."]}'
}]
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def build_list_from_dict(args: dict) -> List[dict]:
"""
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def convert_timestamp(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
def replace_keys(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(ENTITIESDICT[key] if key in ENTITIESDICT else key): replace_keys(value)
for key, value in obj_to_build.items()
}
return obj_to_build
def remove_unselected_context_keys(context_data):
for attribute in context_data['Attribute']:
for key in list(attribute.keys()):
if key not in DATA_KEYS_TO_SAVE:
del attribute[key]
def limit_attributes_count(event: dict) -> dict:
"""
Gets a MISP's event and limiting the amount of attributes to MAX_ATTRIBUTES
Args:
event (dict): MISP's event
Returns:
dict: context output
"""
if event and 'Attribute' in event and len(event['Attribute']) > MAX_ATTRIBUTES:
attributes = event['Attribute']
attributes_num = len(attributes)
event_id = event.get('id', '')
event_uuid = event.get('uuid')
demisto.info(f'Limiting amount of attributes in event to {MAX_ATTRIBUTES} '
f'to keep context from being overwhelmed. '
f'This limit can be changed in the integration configuration. '
f'Event ID: {event_id}, Event UUID: {event_uuid}, Attributes in event: {attributes_num}')
sorted_attributes = sorted(attributes, key=lambda at: int(at.get('timestamp', 0)))
event['Attribute'] = sorted_attributes[attributes_num - MAX_ATTRIBUTES:]
return event
return event
def arrange_context_according_to_user_selection(context_data):
if not DATA_KEYS_TO_SAVE:
return
# each related event has it's own attributes
for event in context_data:
# Remove filtered fields in event
remove_unselected_context_keys(event)
# Remove filtered fields in object
for obj in event['Object']:
remove_unselected_context_keys(obj)
def build_context(response: Union[dict, requests.Response]) -> dict: # type: ignore
"""
Gets a MISP's response and building it to be in context. If missing key, will return the one written.
Args:
response (requests.Response or dict):
Returns:
dict: context output
"""
event_args = [
'id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'Attribute',
'ShadowAttribute',
'RelatedEvent',
'Galaxy',
'Tag',
'Object'
]
# Sometimes, PyMISP will return str instead of a dict. json.loads() wouldn't work unless we'll dumps it first
if isinstance(response, str):
response = json.loads(json.dumps(response))
# Remove 'Event' keyword
events = [event.get('Event') for event in response] # type: ignore
for i in range(0, len(events)):
events[i] = limit_attributes_count(events[i])
# Filter object from keys in event_args
events[i] = {
key: events[i].get(key)
for key in event_args if key in events[i]
}
# Remove 'Event' keyword from 'RelatedEvent'
if events[i].get('RelatedEvent'):
events[i]['RelatedEvent'] = [
r_event.get('Event') for r_event in events[i].get('RelatedEvent')
]
# Get only IDs from related event
events[i]['RelatedEvent'] = [
{
'id': r_event.get('id')
} for r_event in events[i].get('RelatedEvent')
]
# Build Galaxy
if events[i].get('Galaxy'):
events[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in events[i]['Galaxy']
]
# Build tag
if events[i].get('Tag'):
events[i]['Tag'] = [
{'Name': tag.get('name')} for tag in events[i].get('Tag')
]
events = replace_keys(events) # type: ignore
arrange_context_according_to_user_selection(events) # type: ignore
return events # type: ignore
def build_attribute_context(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of attribute search returned from MIPS to the context output format.
"""
attribute_fields = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'value',
'Event',
'Object',
'Galaxy', # field wasn't tested as we don't see it in our responses. Was added by customer's request.
'Tag',
'decay_score'
]
if isinstance(response, str):
response = json.loads(json.dumps(response))
attributes = response.get('Attribute')
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in attribute_fields if key in attributes[i]}
# Build Galaxy
if attributes[i].get('Galaxy'):
attributes[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in attributes[i]['Galaxy']
]
# Build Tag
if attributes[i].get('Tag'):
attributes[i]['Tag'] = [
{'Name': tag.get('name')} for tag in attributes[i].get('Tag')
]
attributes = replace_keys(attributes)
return attributes
def get_misp_threat_level(threat_level_id: str) -> str: # type: ignore
"""Gets MISP's thread level and returning it in Demisto's format
Args:
threat_level_id: str of thread level in MISP
Returns:
str: Threat-level in Demisto
"""
if threat_level_id == '1':
return 'HIGH'
if threat_level_id == '2':
return 'MEDIUM'
if threat_level_id == '3':
return 'LOW'
if threat_level_id == '4':
return 'UNDEFINED'
return_error('Invalid MISP Threat Level with threat_level_id: ' + threat_level_id)
def get_dbot_level(threat_level_id: str) -> int:
"""
MISP to DBOT:
4 = 0 (UNDEFINED to UNKNOWN)
3 = 2 (LOW to SUSPICIOUS)
1 | 2 = 3 (MED/HIGH to MALICIOUS)
Args:
threat_level_id (str):
Returns:
int: DBOT score
"""
if threat_level_id in ('1', '2'):
return 3
if threat_level_id == '3':
return 2
if threat_level_id == '4':
return 0
return 0
def get_files_events():
files = argToList(demisto.args().get('file'), ',')
for file_hash in files:
check_file(file_hash)
def check_file(file_hash):
"""
gets a file_hash and entities dict, returns MISP events
file_hash (str): File's hash from demisto
Returns:
dict: MISP's output formatted to demisto:
"""
# hashFormat will be used only in output
hash_format = get_hash_type(file_hash).upper()
if hash_format == 'Unknown':
return_error('Invalid hash length, enter file hash of format MD5, SHA-1 or SHA-256')
# misp_response will remain the raw output of misp
misp_response = MISP.search(value=file_hash)
if misp_response:
dbot_list = list()
file_list = list()
md_list = list()
for i_event in misp_response:
event = i_event['Event']
i_event['Event']['RelatedEvent'] = [r_event.get('Event') for r_event in event.get('RelatedEvent')]
for i_event in misp_response:
event = i_event['Event']
misp_organisation = f"MISP.{event.get("orgc_name")}"
dbot_score = get_dbot_level(event.get('threat_level_id'))
# Build RelatedEvent
# if dbot_score is suspicious or malicious
dbot_obj = {
'Indicator': file_hash,
'Type': 'hash',
'Vendor': 'MISP V2',
'Score': dbot_score
}
file_obj = {
hash_format: file_hash
}
# if malicious, find file with given hash
if dbot_score == 3:
file_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'file hash found in MISP event with ID: {event.get('id')}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
file_list.append(file_obj)
dbot_list.append(dbot_obj)
md_list.append(md_obj)
# Building entry
outputs = {
outputPaths.get('file'): file_list,
outputPaths.get('dbotscore'): dbot_list
}
md = tableToMarkdown(f'Results found in MISP for hash: {file_hash}', md_list)
else:
md = f"No events found in MISP for hash {file_hash}"
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': file_hash,
'Type': 'hash',
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=misp_response,
))
def get_ips_events():
ips = argToList(demisto.args().get('ip'), ',')
for ip in ips:
check_ip(ip)
def check_ip(ip):
"""
Gets a IP and returning its reputation (if exists)
ip (str): IP to check
"""
if not is_ip_valid(ip):
return_error("IP isn't valid")
misp_response = MISP.search(value=ip)
if misp_response:
dbot_list = list()
ip_list = list()
md_list = list()
for event_in_response in misp_response:
event = event_in_response.get('Event')
dbot_score = get_dbot_level(event.get('threat_level_id'))
misp_organisation = f'MISP.{event.get('Orgc').get('name')}'
dbot_obj = {
'Indicator': ip,
'Type': 'ip',
'Vendor': 'MISP V2',
'Score': dbot_score
}
ip_obj = {'Address': ip}
# if malicious
if dbot_score == 3:
ip_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'IP Found in MISP event: {event.get('id')}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
ip_list.append(ip_obj)
dbot_list.append(dbot_obj)
md_list.append(md_obj)
outputs = {
outputPaths.get('ip'): ip_list,
outputPaths.get('dbotscore'): dbot_list,
MISP_PATH: build_context(misp_response)
}
md = tableToMarkdown(f'Results found in MISP for IP: {ip}', md_list)
else:
md = f'No events found in MISP for IP: {ip}'
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': ip,
'Type': DBotScoreType.IP,
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=misp_response,
))
def upload_sample():
"""
Misp needs to get files in base64. in the old integration (js) it was converted by a script.
"""
# Creating dict with Demisto's arguments
args = ['distribution', 'to_ids', 'category', 'info', 'analysis', 'comment', 'threat_level_id']
args = {key: demisto.args().get(key) for key in args if demisto.args().get(key)}
args['threat_level_id'] = THREAT_LEVELS_NUMBERS.get(demisto.args().get('threat_level_id')) if demisto.args().get(
'threat_level_id') in THREAT_LEVELS_NUMBERS else demisto.args().get('threat_level_id')
args['analysis'] = ANALYSIS_NUMBERS.get(demisto.args().get('analysis')) if demisto.args().get(
'analysis') in ANALYSIS_NUMBERS else demisto.args().get('analysis')
event_id = demisto.args().get('event_id')
file = demisto.getFilePath(demisto.args().get('fileEntryID'))
filename = file.get('name')
file = file.get('path')
if not file:
return_error(f'file {filename} is empty or missing')
if not event_id:
if not demisto.args().get('info'):
demisto.args()['info'] = filename
event_id = create_event(ret_only_event_id=True)
res = MISP.upload_sample(filename=filename, filepath_or_bytes=file, event_id=event_id, **args)
if res.get('name') == 'Failed':
ec = None
else:
ec = {"MISP.UploadedSample": {filename: event_id}}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': res,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable':
f"MISP upload sample \n* message: {res.get("message")}\n* event id: {event_id}\n* file name: {filename}",
'EntryContext': ec,
})
def get_time_now():
"""
Returns:
str: time in year--month--day format
"""
time_now = time.gmtime(time.time())
return f'{time_now.tm_year}--{time_now.tm_mon}--{time_now.tm_mday}'
def create_event(ret_only_event_id: bool = False) -> Union[int, None]:
"""Creating event in MISP with the given attribute
Args:
ret_only_event_id (bool): returning event ID if set to True
Returns:
int: event_id
"""
d_args = demisto.args()
# new_event in the old integration gets some args that belongs to attribute, so after creating the basic event,
# we will add attribute
event_dic = {
'distribution': d_args.get('distribution'),
'threat_level_id': THREAT_LEVELS_NUMBERS.get(d_args.get('threat_level_id')) if d_args.get(
'threat_level_id') in THREAT_LEVELS_NUMBERS else d_args.get('threat_level_id'),
'analysis': ANALYSIS_NUMBERS.get(demisto.args().get('analysis')) if demisto.args().get(
'analysis') in ANALYSIS_NUMBERS else demisto.args().get('analysis'),
'info': d_args.get('info') if d_args.get('info') else 'Event from Demisto',
'date': d_args.get('date') if d_args.get('date') else get_time_now(),
'published': True if d_args.get('published') == 'true' else False,
'orgc_id': d_args.get('orgc_id'),
'org_id': d_args.get('org_id'),
'sharing_group_id': d_args.get('sharing_group_id')
}
event = MISP.new_event(**event_dic)
event_id = event.get('id')
if isinstance(event_id, str) and event_id.isdigit():
event_id = int(event_id)
elif not isinstance(event_id, int):
return_error('EventID must be a number')
if ret_only_event_id:
return event_id
# add attribute
add_attribute(event_id=event_id, internal=True)
event = MISP.search(eventid=event_id)
md = f"## MISP create event\nNew event with ID: {event_id} has been successfully created.\n"
ec = {
MISP_PATH: build_context(event)
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': event,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
return None
def add_attribute(event_id: int = None, internal: bool = None):
"""Adding attribute to given event
Args:
event_id (int): Event ID to add attribute to
internal(bool): if set to True, will not post results to Demisto
"""
d_args = demisto.args()
args = {
'id': d_args.get('id'),
'type': d_args.get('type') if d_args.get('type') else 'other',
'category': d_args.get('category'),
'to_ids': True if d_args.get('to_ids') == 'true' else False,
'distribution': d_args.get('distribution'),
'comment': d_args.get('comment'),
'value': d_args.get('value')
}
if event_id:
args['id'] = event_id # type: ignore
if isinstance(args.get('id'), str) and args.get('id').isdigit(): # type: ignore
args['id'] = int(args['id'])
elif not isinstance(args.get('id'), int):
return_error('Invalid MISP event ID, must be a number')
if args.get('distribution') is not None:
if not isinstance(args.get('distribution'), int):
if isinstance(args.get('distribution'), str) and args.get('distribution').isdigit(): # type: ignore
args['distribution'] = int(args['distribution'])
elif isinstance(args.get('distribution'), str) and args['distribution'] in DISTRIBUTION_NUMBERS:
args['distribution'] = DISTRIBUTION_NUMBERS.get(args['distribution'])
else:
return_error(
"Distribution can be 'Your_organisation_only', "
"'This_community_only', 'Connected_communities' or 'All_communities'"
)
event = MISP.get_event(args.get('id'))
# add attributes
event.add_attribute(**args)
MISP.update_event(event=event)
if internal:
return
event = MISP.search(eventid=args.get('id'))
md = f"## MISP add attribute\nNew attribute: {args.get("value")} was added to event id {args.get("id")}.\n"
ec = {
MISP_PATH: build_context(event)
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {},
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
def download_file():
"""
Will post results of given file's hash if present.
MISP's response should be in case of success:
(True, [EventID, filename, fileContent])
in case of failure:
(False, 'No hits with the given parameters.')
"""
file_hash = demisto.args().get('hash')
event_id = demisto.args().get('eventID')
unzip = True if demisto.args().get('unzip') == 'true' else False
all_samples = True if demisto.args().get('allSamples') in ('1', 'true') else False
response = MISP.download_samples(sample_hash=file_hash,
event_id=event_id,
all_samples=all_samples,
unzip=unzip
)
if not response[0]:
demisto.results(f"Couldn't find file with hash {file_hash}")
else:
if unzip:
files = list()
for f in response:
# Check if it's tuple. if so, f = (EventID, hash, fileContent)
if isinstance(f, tuple) and len(f) == 3:
filename = f[1]
files.append(fileResult(filename, f[2].getbuffer()))
demisto.results(files)
else:
file_buffer = response[1][0][2].getbuffer()
filename = response[1][0][1]
demisto.results(fileResult(filename, file_buffer)) # type: ignore
def get_urls_events():
urls = argToList(demisto.args().get('url'), ',')
demisto.results(urls)
for url in urls:
check_url(url)
def check_url(url):
response = MISP.search(value=url, type_attribute='url')
if response:
dbot_list = list()
md_list = list()
url_list = list()
for event_in_response in response:
event = event_in_response.get('Event')
dbot_score = get_dbot_level(event.get('threat_level_id'))
misp_organisation = f"MISP.{event.get("Orgc").get("name")}"
dbot_obj = {
'Indicator': url,
'Type': 'url',
'Vendor': 'MISP V2',
'Score': dbot_score
}
url_obj = {
'Data': url,
}
if dbot_score == 3:
url_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'IP Found in MISP event: {event.get('id')}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
dbot_list.append(dbot_obj)
md_list.append(md_obj)
url_list.append(url_obj)
outputs = {
outputPaths.get('url'): url_list,
outputPaths.get('dbotscore'): dbot_list,
MISP_PATH: build_context(response)
}
md = tableToMarkdown(f'MISP Reputation for URL: {url}', md_list)
else:
md = f'No events found in MISP for URL: {url}'
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': url,
'Type': DBotScoreType.URL,
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=response,
))
def build_misp_complex_filter(demisto_query: str) -> str:
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
str: dictionary created for misp to perform complex query
or if no complex query found returns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regex_and = r'(AND:)([^\;]+)(;)?'
regex_or = r'(OR:)([^\;]+)(;)?'
regex_not = r'(NOT:)([^\;]+)(;)?'
misp_query_params = dict()
is_complex_search = False
match_and = re.search(regex_and, demisto_query, re.MULTILINE)
match_or = re.search(regex_or, demisto_query, re.MULTILINE)
match_not = re.search(regex_not, demisto_query, re.MULTILINE)
if match_and is not None:
misp_query_params['and_parameters'] = match_and.group(2).split(',')
is_complex_search = True
if match_or is not None:
misp_query_params['or_parameters'] = match_or.group(2).split(',')
is_complex_search = True
if match_not is not None:
misp_query_params['not_parameters'] = match_not.group(2).split(',')
is_complex_search = True
if is_complex_search:
misp_complex_query = MISP.build_complex_query(**misp_query_params)
return misp_complex_query
return demisto_query
def search(post_to_warroom: bool = True) -> Tuple[dict, Any]:
"""
will search in MISP
Returns
dict: Object with results to demisto:
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'event_id',
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'last',
'eventid',
'uuid',
'to_ids'
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# build MISP complex filter
if 'tags' in args:
args['tags'] = build_misp_complex_filter(args['tags'])
response = MISP.search(**args)
if response:
response_for_context = build_context(response)
# Prepare MD. getting all keys and values if exists
args_for_md = {key: value for key, value in args.items() if value}
if post_to_warroom:
md = tableToMarkdown('Results in MISP for search:', args_for_md)
md_event = response_for_context[0]
md += f'Total of {len(response_for_context)} events found\n'
event_highlights = {
'Info': md_event.get('Info'),
'Timestamp': convert_timestamp(md_event.get('Timestamp')),
'Analysis': ANALYSIS_WORDS[md_event.get('Analysis')],
'Threat Level ID': THREAT_LEVELS_WORDS[md_event.get('ThreatLevelID')],
'Event Creator Email': md_event.get('EventCreatorEmail'),
'Attributes': json.dumps(md_event.get('Attribute'), indent=4),
'Related Events': md_event.get('RelatedEvent')
}
md += tableToMarkdown(f'Event ID: {md_event.get('ID')}', event_highlights)
if md_event.get('Galaxy'):
md += tableToMarkdown('Galaxy:', md_event.get('Galaxy'))
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No events found in MISP for {args}")
return {}, {}
def search_attributes() -> Tuple[dict, Any]:
"""
Execute a MIPS search using the 'attributes' controller.
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'value',
'type',
'category',
'uuid',
'to_ids',
'last',
'include_decay_score'
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# Set the controller to attributes to search for attributes and not events
args['controller'] = 'attributes'
response = MISP.search(**args)
if response:
response_for_context = build_attribute_context(response)
md = f'## MISP attributes-search returned {len(response_for_context)} attributes.\n'
# if attributes were returned, display one to the warroom to visualize the result:
if len(response_for_context) > 0:
md += tableToMarkdown(f'Attribute ID: {response_for_context[0].get('ID')}', response_for_context[0])
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_ATTRIBUTE_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No attributes found in MISP for {args}")
return {}, {}
def delete_event():
"""
Gets an event id and deletes it.
"""
event_id = demisto.args().get('event_id')
event = MISP.delete_event(event_id)
if 'errors' in event:
return_error(f'Event ID: {event_id} has not found in MISP: \nError message: {event}')
else:
md = f'Event {event_id} has been deleted'
demisto.results({
'Type': entryTypes['note'],
'Contents': event,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md
})
def add_tag():
"""
Function will add tag to given UUID of event or attribute.
"""
uuid = demisto.args().get('uuid')
tag = demisto.args().get('tag')
MISP.tag(uuid, tag)
event = MISP.search(uuid=uuid)
ec = {
MISP_PATH: build_context(event)
}
md = f'Tag {tag} has been successfully added to event {uuid}'
demisto.results({
'Type': entryTypes['note'],
'Contents': event,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
def add_sighting():
"""Adds sighting to MISP attribute
"""
sighting = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
kargs = {
'id': demisto.args().get('id'),
'uuid': demisto.args().get('uuid'),
'type': sighting.get(demisto.args().get('type'))
}
att_id = demisto.args().get('id', demisto.args().get('uuid'))
if att_id:
MISP.set_sightings(kargs)
demisto.results(f'Sighting \'{demisto.args().get('type')}\' has been successfully added to attribute {att_id}')
else:
return_error('ID or UUID not specified')
def test():
"""
Test module.
"""
if MISP.test_connection():
demisto.results('ok')
else:
return_error('MISP has not connected.')
def add_events_from_feed():
"""Gets an OSINT feed from url and publishing them to MISP
urls with feeds for example: `https://www.misp-project.org/feeds/`
feed format must be MISP.
"""
headers = {'Accept': 'application/json'}
url = demisto.getArg('feed') # type: str
url = url[:-1] if url.endswith('/') else url
if PREDEFINED_FEEDS.get(url):
url = PREDEFINED_FEEDS[url].get('url') # type: ignore
limit = demisto.getArg('limit') # type: str
limit_int = int(limit) if limit.isdigit() else 0
osint_url = f'{url}/manifest.json'
not_added_counter = 0
try:
uri_list = requests.get(osint_url, verify=USE_SSL, headers=headers, proxies=proxies).json()
events_numbers = list() # type: List[Dict[str, int]]
for num, uri in enumerate(uri_list, 1):
req = requests.get(f'{url}/{uri}.json', verify=USE_SSL, headers=headers, proxies=proxies).json()
event = MISP.add_event(req)
if 'id' in event:
events_numbers.append({'ID': event['id']})
else:
not_added_counter += 1
# If limit exists
if limit_int == num:
break
entry_context = {MISP_PATH: events_numbers}
human_readable = tableToMarkdown(
f'Total of {len(events_numbers)} events was added to MISP.',
events_numbers,
headers='Event IDs'
)
if not_added_counter:
human_readable = f'{human_readable}\n' \
f'{not_added_counter} events were not added. Might already been added earlier.'
return_outputs(human_readable, outputs=entry_context)
except ValueError:
return_error(f'URL [{url}] is not a valid MISP feed')
def add_object(event_id: str, obj: MISPObject):
"""Sending object to MISP and returning outputs
Args:
obj: object to add to MISP
event_id: ID of event
"""
response = MISP.add_object(event_id, misp_object=obj)
if 'errors' in response:
errors = extract_error(response["errors"])
error_string = str()
for err in errors:
error_string += f'\n\tError code: {err['code']} ' \
f'\n\tMessage: {err['message']}' \
f'\n\tErrors: {err['errors']}\n'
return_error(f'Error in `{command}` command: {error_string}')
for ref in obj.ObjectReference:
response = MISP.add_object_reference(ref)
formatted_response = replace_keys(response)
entry_context = {
MISP_PATH:
{
'ID': event_id
}
}
entry_context[MISP_PATH].update(formatted_response) # type: ignore
human_readable = f'Object has been added to MISP event ID {event_id}'
return_outputs(
human_readable,
entry_context,
response
) # type: ignore
def add_email_object():
entry_id = demisto.getArg('entry_id')
event_id = demisto.getArg('event_id')
email_path = demisto.getFilePath(entry_id).get('path')
obj = EMailObject(email_path)
add_object(event_id, obj)
def add_domain_object():
"""Adds a domain object to MISP
domain-ip description: https://www.misp-project.org/objects.html#_domain_ip
"""
template = 'domain-ip'
args = [
'text',
'creation_date',
'first_seen',
'last_seen'
]
event_id = demisto.getArg('event_id')
domain = demisto.getArg('name')
obj = MISPObject(template)
ips = argToList(demisto.getArg('dns'))
for ip in ips:
obj.add_attribute('ip', value=ip)
obj.add_attribute('domain', value=domain)
for arg in args:
value = demisto.getArg(arg)
if value:
obj.add_attribute(arg, value=value)
add_object(event_id, obj)
def add_url_object():
"""Building url object in MISP scheme
Scheme described https://www.misp-project.org/objects.html#_url
"""
template = 'url'
url_args = [
'text',
'last_seen',
'first_seen'
]
event_id = demisto.getArg('event_id')
url = demisto.getArg('url')
url_parse = urlparse(url)
url_obj = [
{'url': url}
]
if url_parse.scheme:
url_obj.append({'scheme': url_parse.scheme})
if url_parse.path:
url_obj.append({'resource_path': url_parse.path})
if url_parse.query:
url_obj.append({'query_string': url_parse.query})
if url_parse.netloc:
url_obj.append({'domain': url_parse.netloc})
if url_parse.fragment:
url_obj.append({'fragment': url_parse.fragment})
if url_parse.port:
url_obj.append({'port': url_parse.port})
if url_parse.username and url_parse.password:
url_obj.append({'credential': (url_parse.username, url_parse.password)})
for arg in url_args:
new_arg = demisto.getArg(arg)
if new_arg:
url_obj.append({arg.replace('_', '-'): new_arg})
g_object = build_generic_object(template, url_obj)
add_object(event_id, g_object)
def add_generic_object_command():
event_id = demisto.getArg('event_id')
template = demisto.getArg('template')
attributes = demisto.getArg('attributes') # type: str
attributes = attributes.replace("'", '"')
try:
args = json.loads(attributes)
if not isinstance(args, list):
args = build_list_from_dict(args)
obj = build_generic_object(template, args)
add_object(event_id, obj)
except ValueError as e:
return_error(f'`attribute` parameter could not be decoded, may not a valid JSON\nattribute: {attributes}',
str(e))
def add_ip_object():
template = 'ip-port'
event_id = demisto.getArg('event_id')
args = [
'dst_port',
'src_port',
'domain',
'hostname',
'ip_src',
'ip_dst'
]
attr = [{arg.replace('_', '-'): demisto.getArg(arg)} for arg in args if demisto.getArg(arg)]
ips = argToList(demisto.getArg('ip'))
for ip in ips:
attr.append({'ip': ip})
if attr:
non_req_args = [
'first_seen',
'last_seen',
]
attr.extend({arg.replace('_', '-'): demisto.getArg(arg)} for arg in non_req_args if demisto.getArg(arg))
if demisto.getArg('comment'):
attr.append({'text': demisto.getArg('comment')})
obj = build_generic_object(template, attr)
add_object(event_id, obj)
else:
return_error(f'None of required arguments presents. command {command} requires one of {args}')
''' COMMANDS MANAGER / SWITCH PANEL '''
command = demisto.command()
def main():
LOG(f'command is {command}')
demisto.info(f'command is {command}')
try:
if command == 'test-module':
# This is the call made when pressing the integration test button.
test()
elif command == 'misp-upload-sample':
upload_sample()
elif command == 'misp-download-sample':
download_file()
elif command in ('internal-misp-create-event', 'misp-create-event'):
create_event()
elif command in ('internal-misp-add-attribute', 'misp-add-attribute'):
add_attribute()
elif command == 'misp-search':
search()
elif command == 'misp-search-attributes':
search_attributes()
elif command == 'misp-delete-event':
delete_event()
elif command == 'misp-add-sighting':
add_sighting()
elif command == 'misp-add-tag':
add_tag()
elif command == 'misp-add-events-from-feed':
add_events_from_feed()
elif command == 'file':
get_files_events()
elif command == 'url':
get_urls_events()
elif command == 'ip':
get_ips_events()
# Object commands
elif command == 'misp-add-email-object':
add_email_object()
elif command == 'misp-add-domain-object':
add_domain_object()
elif command == 'misp-add-url-object':
add_url_object()
elif command == 'misp-add-ip-object':
add_ip_object()
elif command == 'misp-add-object':
add_generic_object_command()
except PyMISPError as e:
return_error(e.message)
except Exception as e:
return_error(str(e))
if __name__ in ('__builtin__', 'builtins'):
main()
# TODO: in 5.0
# * Add !file (need docker change).
| # type: ignore
import logging
import warnings
from typing import Union, List, Any, Tuple, Dict
from urllib.parse import urlparse
import requests
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject
from pymisp.tools import EMailObject, GenericObjectGenerator
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
requests.packages.urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
PARAMS = demisto.params()
MISP_KEY = PARAMS.get('api_key')
MISP_URL = PARAMS.get('url')
USE_SSL = not PARAMS.get('insecure')
proxies = handle_proxy() # type: ignore
MISP_PATH = 'MISP.Event(obj.ID === val.ID)'
MISP_ATTRIBUTE_PATH = 'MISP.Attribute(obj.ID === val.ID)'
MISP = ExpandedPyMISP(url=MISP_URL, key=MISP_KEY, ssl=USE_SSL, proxies=proxies) # type: ExpandedPyMISP
DATA_KEYS_TO_SAVE = PARAMS.get('context_select', [])
try:
MAX_ATTRIBUTES = int(PARAMS.get('attributes_limit') or 1000)
except ValueError:
return_error("Maximum attributes in event must be a positive number")
else:
if MAX_ATTRIBUTES < 1:
return_error("Maximum attributes in event must be a positive number")
"""
dict format :
MISP key:DEMISTO key
"""
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
ENTITIESDICT = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'Timestamp',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'Date',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganisationID',
'Org': 'Organisation',
'Orgc': 'OwnerOrganisation',
'orgc_uuid': 'OwnerOrganisation.UUID',
'orgc_id': 'OwnerOrganisation.ID',
'orgc_name': 'OwnerOrganisation.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore'
}
THREAT_LEVELS_WORDS = {
'1': 'HIGH',
'2': 'MEDIUM',
'3': 'LOW',
'4': 'UNDEFINED'
}
THREAT_LEVELS_NUMBERS = {
'high': 1,
'medium': 2,
'low': 3,
'undefined': 4
}
ANALYSIS_WORDS = {
'0': 'Initial',
'1': 'Ongoing',
'2': 'Completed'
}
ANALYSIS_NUMBERS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
DISTRIBUTION_NUMBERS = {
'Your_organisation_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3
}
''' HELPER FUNCTIONS '''
def extract_error(error: list) -> List[dict]:
"""Extracting errors
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
Examples:
extract_error([
(403,
{
'name': 'Could not add object',
'message': 'Could not add object',
'url': '/objects/add/156/',
'errors': 'Could not save object as at least one attribute has failed validation (ip). \
{"value":["IP address has an invalid format."]}'
}
)
])
Response:
[{
'code': 403,
'message': 'Could not add object',
'errors': 'Could not save object as at least one attribute has failed validation (ip). \
{"value":["IP address has an invalid format."]}'
}]
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def build_list_from_dict(args: dict) -> List[dict]:
"""
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def convert_timestamp(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
def replace_keys(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(ENTITIESDICT[key] if key in ENTITIESDICT else key): replace_keys(value)
for key, value in obj_to_build.items()
}
return obj_to_build
def remove_unselected_context_keys(context_data):
for attribute in context_data['Attribute']:
for key in list(attribute.keys()):
if key not in DATA_KEYS_TO_SAVE:
del attribute[key]
def limit_attributes_count(event: dict) -> dict:
"""
Gets a MISP's event and limiting the amount of attributes to MAX_ATTRIBUTES
Args:
event (dict): MISP's event
Returns:
dict: context output
"""
if event and 'Attribute' in event and len(event['Attribute']) > MAX_ATTRIBUTES:
attributes = event['Attribute']
attributes_num = len(attributes)
event_id = event.get('id', '')
event_uuid = event.get('uuid')
demisto.info(f'Limiting amount of attributes in event to {MAX_ATTRIBUTES} '
f'to keep context from being overwhelmed. '
f'This limit can be changed in the integration configuration. '
f'Event ID: {event_id}, Event UUID: {event_uuid}, Attributes in event: {attributes_num}')
sorted_attributes = sorted(attributes, key=lambda at: int(at.get('timestamp', 0)))
event['Attribute'] = sorted_attributes[attributes_num - MAX_ATTRIBUTES:]
return event
return event
def arrange_context_according_to_user_selection(context_data):
if not DATA_KEYS_TO_SAVE:
return
# each related event has it's own attributes
for event in context_data:
# Remove filtered fields in event
remove_unselected_context_keys(event)
# Remove filtered fields in object
for obj in event['Object']:
remove_unselected_context_keys(obj)
def build_context(response: Union[dict, requests.Response]) -> dict: # type: ignore
"""
Gets a MISP's response and building it to be in context. If missing key, will return the one written.
Args:
response (requests.Response or dict):
Returns:
dict: context output
"""
event_args = [
'id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'Attribute',
'ShadowAttribute',
'RelatedEvent',
'Galaxy',
'Tag',
'Object'
]
# Sometimes, PyMISP will return str instead of a dict. json.loads() wouldn't work unless we'll dumps it first
if isinstance(response, str):
response = json.loads(json.dumps(response))
# Remove 'Event' keyword
events = [event.get('Event') for event in response] # type: ignore
for i in range(0, len(events)):
events[i] = limit_attributes_count(events[i])
# Filter object from keys in event_args
events[i] = {
key: events[i].get(key)
for key in event_args if key in events[i]
}
# Remove 'Event' keyword from 'RelatedEvent'
if events[i].get('RelatedEvent'):
events[i]['RelatedEvent'] = [
r_event.get('Event') for r_event in events[i].get('RelatedEvent')
]
# Get only IDs from related event
events[i]['RelatedEvent'] = [
{
'id': r_event.get('id')
} for r_event in events[i].get('RelatedEvent')
]
# Build Galaxy
if events[i].get('Galaxy'):
events[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in events[i]['Galaxy']
]
# Build tag
if events[i].get('Tag'):
events[i]['Tag'] = [
{'Name': tag.get('name')} for tag in events[i].get('Tag')
]
events = replace_keys(events) # type: ignore
arrange_context_according_to_user_selection(events) # type: ignore
return events # type: ignore
def build_attribute_context(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of attribute search returned from MIPS to the context output format.
"""
attribute_fields = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'value',
'Event',
'Object',
'Galaxy', # field wasn't tested as we don't see it in our responses. Was added by customer's request.
'Tag',
'decay_score'
]
if isinstance(response, str):
response = json.loads(json.dumps(response))
attributes = response.get('Attribute')
for i in range(len(attributes)):
attributes[i] = {key: attributes[i].get(key) for key in attribute_fields if key in attributes[i]}
# Build Galaxy
if attributes[i].get('Galaxy'):
attributes[i]['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in attributes[i]['Galaxy']
]
# Build Tag
if attributes[i].get('Tag'):
attributes[i]['Tag'] = [
{'Name': tag.get('name')} for tag in attributes[i].get('Tag')
]
attributes = replace_keys(attributes)
return attributes
def get_misp_threat_level(threat_level_id: str) -> str: # type: ignore
"""Gets MISP's thread level and returning it in Demisto's format
Args:
threat_level_id: str of thread level in MISP
Returns:
str: Threat-level in Demisto
"""
if threat_level_id == '1':
return 'HIGH'
if threat_level_id == '2':
return 'MEDIUM'
if threat_level_id == '3':
return 'LOW'
if threat_level_id == '4':
return 'UNDEFINED'
return_error('Invalid MISP Threat Level with threat_level_id: ' + threat_level_id)
def get_dbot_level(threat_level_id: str) -> int:
"""
MISP to DBOT:
4 = 0 (UNDEFINED to UNKNOWN)
3 = 2 (LOW to SUSPICIOUS)
1 | 2 = 3 (MED/HIGH to MALICIOUS)
Args:
threat_level_id (str):
Returns:
int: DBOT score
"""
if threat_level_id in ('1', '2'):
return 3
if threat_level_id == '3':
return 2
if threat_level_id == '4':
return 0
return 0
def get_files_events():
files = argToList(demisto.args().get('file'), ',')
for file_hash in files:
check_file(file_hash)
def check_file(file_hash):
"""
gets a file_hash and entities dict, returns MISP events
file_hash (str): File's hash from demisto
Returns:
dict: MISP's output formatted to demisto:
"""
# hashFormat will be used only in output
hash_format = get_hash_type(file_hash).upper()
if hash_format == 'Unknown':
return_error('Invalid hash length, enter file hash of format MD5, SHA-1 or SHA-256')
# misp_response will remain the raw output of misp
misp_response = MISP.search(value=file_hash)
if misp_response:
dbot_list = list()
file_list = list()
md_list = list()
for i_event in misp_response:
event = i_event['Event']
i_event['Event']['RelatedEvent'] = [r_event.get('Event') for r_event in event.get('RelatedEvent')]
for i_event in misp_response:
event = i_event['Event']
misp_organisation = f"MISP.{event.get('orgc_name')}"
dbot_score = get_dbot_level(event.get('threat_level_id'))
# Build RelatedEvent
# if dbot_score is suspicious or malicious
dbot_obj = {
'Indicator': file_hash,
'Type': 'hash',
'Vendor': 'MISP V2',
'Score': dbot_score
}
file_obj = {
hash_format: file_hash
}
# if malicious, find file with given hash
if dbot_score == 3:
file_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'file hash found in MISP event with ID: {event.get("id")}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
file_list.append(file_obj)
dbot_list.append(dbot_obj)
md_list.append(md_obj)
# Building entry
outputs = {
outputPaths.get('file'): file_list,
outputPaths.get('dbotscore'): dbot_list
}
md = tableToMarkdown(f'Results found in MISP for hash: {file_hash}', md_list)
else:
md = f"No events found in MISP for hash {file_hash}"
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': file_hash,
'Type': 'hash',
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=misp_response,
))
def get_ips_events():
ips = argToList(demisto.args().get('ip'), ',')
for ip in ips:
check_ip(ip)
def check_ip(ip):
"""
Gets a IP and returning its reputation (if exists)
ip (str): IP to check
"""
if not is_ip_valid(ip):
return_error("IP isn't valid")
misp_response = MISP.search(value=ip)
if misp_response:
dbot_list = list()
ip_list = list()
md_list = list()
for event_in_response in misp_response:
event = event_in_response.get('Event')
dbot_score = get_dbot_level(event.get('threat_level_id'))
misp_organisation = f'MISP.{event.get("Orgc").get("name")}'
dbot_obj = {
'Indicator': ip,
'Type': 'ip',
'Vendor': 'MISP V2',
'Score': dbot_score
}
ip_obj = {'Address': ip}
# if malicious
if dbot_score == 3:
ip_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'IP Found in MISP event: {event.get("id")}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
ip_list.append(ip_obj)
dbot_list.append(dbot_obj)
md_list.append(md_obj)
outputs = {
outputPaths.get('ip'): ip_list,
outputPaths.get('dbotscore'): dbot_list,
MISP_PATH: build_context(misp_response)
}
md = tableToMarkdown(f'Results found in MISP for IP: {ip}', md_list)
else:
md = f'No events found in MISP for IP: {ip}'
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': ip,
'Type': DBotScoreType.IP,
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=misp_response,
))
def upload_sample():
"""
Misp needs to get files in base64. in the old integration (js) it was converted by a script.
"""
# Creating dict with Demisto's arguments
args = ['distribution', 'to_ids', 'category', 'info', 'analysis', 'comment', 'threat_level_id']
args = {key: demisto.args().get(key) for key in args if demisto.args().get(key)}
args['threat_level_id'] = THREAT_LEVELS_NUMBERS.get(demisto.args().get('threat_level_id')) if demisto.args().get(
'threat_level_id') in THREAT_LEVELS_NUMBERS else demisto.args().get('threat_level_id')
args['analysis'] = ANALYSIS_NUMBERS.get(demisto.args().get('analysis')) if demisto.args().get(
'analysis') in ANALYSIS_NUMBERS else demisto.args().get('analysis')
event_id = demisto.args().get('event_id')
file = demisto.getFilePath(demisto.args().get('fileEntryID'))
filename = file.get('name')
file = file.get('path')
if not file:
return_error(f'file {filename} is empty or missing')
if not event_id:
if not demisto.args().get('info'):
demisto.args()['info'] = filename
event_id = create_event(ret_only_event_id=True)
res = MISP.upload_sample(filename=filename, filepath_or_bytes=file, event_id=event_id, **args)
if res.get('name') == 'Failed':
ec = None
else:
ec = {"MISP.UploadedSample": {filename: event_id}}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': res,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable':
f"MISP upload sample \n* message: {res.get('message')}\n* event id: {event_id}\n* file name: {filename}",
'EntryContext': ec,
})
def get_time_now():
"""
Returns:
str: time in year--month--day format
"""
time_now = time.gmtime(time.time())
return f'{time_now.tm_year}--{time_now.tm_mon}--{time_now.tm_mday}'
def create_event(ret_only_event_id: bool = False) -> Union[int, None]:
"""Creating event in MISP with the given attribute
Args:
ret_only_event_id (bool): returning event ID if set to True
Returns:
int: event_id
"""
d_args = demisto.args()
# new_event in the old integration gets some args that belongs to attribute, so after creating the basic event,
# we will add attribute
event_dic = {
'distribution': d_args.get('distribution'),
'threat_level_id': THREAT_LEVELS_NUMBERS.get(d_args.get('threat_level_id')) if d_args.get(
'threat_level_id') in THREAT_LEVELS_NUMBERS else d_args.get('threat_level_id'),
'analysis': ANALYSIS_NUMBERS.get(demisto.args().get('analysis')) if demisto.args().get(
'analysis') in ANALYSIS_NUMBERS else demisto.args().get('analysis'),
'info': d_args.get('info') if d_args.get('info') else 'Event from Demisto',
'date': d_args.get('date') if d_args.get('date') else get_time_now(),
'published': True if d_args.get('published') == 'true' else False,
'orgc_id': d_args.get('orgc_id'),
'org_id': d_args.get('org_id'),
'sharing_group_id': d_args.get('sharing_group_id')
}
event = MISP.new_event(**event_dic)
event_id = event.get('id')
if isinstance(event_id, str) and event_id.isdigit():
event_id = int(event_id)
elif not isinstance(event_id, int):
return_error('EventID must be a number')
if ret_only_event_id:
return event_id
# add attribute
add_attribute(event_id=event_id, internal=True)
event = MISP.search(eventid=event_id)
md = f"## MISP create event\nNew event with ID: {event_id} has been successfully created.\n"
ec = {
MISP_PATH: build_context(event)
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': event,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
return None
def add_attribute(event_id: int = None, internal: bool = None):
"""Adding attribute to given event
Args:
event_id (int): Event ID to add attribute to
internal(bool): if set to True, will not post results to Demisto
"""
d_args = demisto.args()
args = {
'id': d_args.get('id'),
'type': d_args.get('type') if d_args.get('type') else 'other',
'category': d_args.get('category'),
'to_ids': True if d_args.get('to_ids') == 'true' else False,
'distribution': d_args.get('distribution'),
'comment': d_args.get('comment'),
'value': d_args.get('value')
}
if event_id:
args['id'] = event_id # type: ignore
if isinstance(args.get('id'), str) and args.get('id').isdigit(): # type: ignore
args['id'] = int(args['id'])
elif not isinstance(args.get('id'), int):
return_error('Invalid MISP event ID, must be a number')
if args.get('distribution') is not None:
if not isinstance(args.get('distribution'), int):
if isinstance(args.get('distribution'), str) and args.get('distribution').isdigit(): # type: ignore
args['distribution'] = int(args['distribution'])
elif isinstance(args.get('distribution'), str) and args['distribution'] in DISTRIBUTION_NUMBERS:
args['distribution'] = DISTRIBUTION_NUMBERS.get(args['distribution'])
else:
return_error(
"Distribution can be 'Your_organisation_only', "
"'This_community_only', 'Connected_communities' or 'All_communities'"
)
event = MISP.get_event(args.get('id'))
# add attributes
event.add_attribute(**args)
MISP.update_event(event=event)
if internal:
return
event = MISP.search(eventid=args.get('id'))
md = f"## MISP add attribute\nNew attribute: {args.get('value')} was added to event id {args.get('id')}.\n"
ec = {
MISP_PATH: build_context(event)
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': {},
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
def download_file():
"""
Will post results of given file's hash if present.
MISP's response should be in case of success:
(True, [EventID, filename, fileContent])
in case of failure:
(False, 'No hits with the given parameters.')
"""
file_hash = demisto.args().get('hash')
event_id = demisto.args().get('eventID')
unzip = True if demisto.args().get('unzip') == 'true' else False
all_samples = True if demisto.args().get('allSamples') in ('1', 'true') else False
response = MISP.download_samples(sample_hash=file_hash,
event_id=event_id,
all_samples=all_samples,
unzip=unzip
)
if not response[0]:
demisto.results(f"Couldn't find file with hash {file_hash}")
else:
if unzip:
files = list()
for f in response:
# Check if it's tuple. if so, f = (EventID, hash, fileContent)
if isinstance(f, tuple) and len(f) == 3:
filename = f[1]
files.append(fileResult(filename, f[2].getbuffer()))
demisto.results(files)
else:
file_buffer = response[1][0][2].getbuffer()
filename = response[1][0][1]
demisto.results(fileResult(filename, file_buffer)) # type: ignore
def get_urls_events():
urls = argToList(demisto.args().get('url'), ',')
demisto.results(urls)
for url in urls:
check_url(url)
def check_url(url):
response = MISP.search(value=url, type_attribute='url')
if response:
dbot_list = list()
md_list = list()
url_list = list()
for event_in_response in response:
event = event_in_response.get('Event')
dbot_score = get_dbot_level(event.get('threat_level_id'))
misp_organisation = f"MISP.{event.get('Orgc').get('name')}"
dbot_obj = {
'Indicator': url,
'Type': 'url',
'Vendor': 'MISP V2',
'Score': dbot_score
}
url_obj = {
'Data': url,
}
if dbot_score == 3:
url_obj['Malicious'] = {
'Vendor': 'MISP V2',
'Description': f'IP Found in MISP event: {event.get("id")}'
}
md_obj = {
'EventID': event.get('id'),
'Threat Level': THREAT_LEVELS_WORDS[event.get('threat_level_id')],
'Organisation': misp_organisation
}
dbot_list.append(dbot_obj)
md_list.append(md_obj)
url_list.append(url_obj)
outputs = {
outputPaths.get('url'): url_list,
outputPaths.get('dbotscore'): dbot_list,
MISP_PATH: build_context(response)
}
md = tableToMarkdown(f'MISP Reputation for URL: {url}', md_list)
else:
md = f'No events found in MISP for URL: {url}'
outputs = {
outputPaths.get('dbotscore'): {
'Indicator': url,
'Type': DBotScoreType.URL,
'Vendor': 'MISP V2',
'Score': Common.DBotScore.NONE,
},
}
return_results(CommandResults(
readable_output=md,
outputs=outputs,
raw_response=response,
))
def build_misp_complex_filter(demisto_query: str) -> str:
"""
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
str: dictionary created for misp to perform complex query
or if no complex query found returns the original input
Example:
demisto_query should look like:
example 1: "AND:param1,param2;OR:param3;NOT:param4,param5"
example 2: "NOT:param3,param5"
example 3 (simple syntax): "param1,param2"
"""
regex_and = r'(AND:)([^\;]+)(;)?'
regex_or = r'(OR:)([^\;]+)(;)?'
regex_not = r'(NOT:)([^\;]+)(;)?'
misp_query_params = dict()
is_complex_search = False
match_and = re.search(regex_and, demisto_query, re.MULTILINE)
match_or = re.search(regex_or, demisto_query, re.MULTILINE)
match_not = re.search(regex_not, demisto_query, re.MULTILINE)
if match_and is not None:
misp_query_params['and_parameters'] = match_and.group(2).split(',')
is_complex_search = True
if match_or is not None:
misp_query_params['or_parameters'] = match_or.group(2).split(',')
is_complex_search = True
if match_not is not None:
misp_query_params['not_parameters'] = match_not.group(2).split(',')
is_complex_search = True
if is_complex_search:
misp_complex_query = MISP.build_complex_query(**misp_query_params)
return misp_complex_query
return demisto_query
def search(post_to_warroom: bool = True) -> Tuple[dict, Any]:
"""
will search in MISP
Returns
dict: Object with results to demisto:
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'event_id',
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'last',
'eventid',
'uuid',
'to_ids'
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# build MISP complex filter
if 'tags' in args:
args['tags'] = build_misp_complex_filter(args['tags'])
response = MISP.search(**args)
if response:
response_for_context = build_context(response)
# Prepare MD. getting all keys and values if exists
args_for_md = {key: value for key, value in args.items() if value}
if post_to_warroom:
md = tableToMarkdown('Results in MISP for search:', args_for_md)
md_event = response_for_context[0]
md += f'Total of {len(response_for_context)} events found\n'
event_highlights = {
'Info': md_event.get('Info'),
'Timestamp': convert_timestamp(md_event.get('Timestamp')),
'Analysis': ANALYSIS_WORDS[md_event.get('Analysis')],
'Threat Level ID': THREAT_LEVELS_WORDS[md_event.get('ThreatLevelID')],
'Event Creator Email': md_event.get('EventCreatorEmail'),
'Attributes': json.dumps(md_event.get('Attribute'), indent=4),
'Related Events': md_event.get('RelatedEvent')
}
md += tableToMarkdown(f'Event ID: {md_event.get("ID")}', event_highlights)
if md_event.get('Galaxy'):
md += tableToMarkdown('Galaxy:', md_event.get('Galaxy'))
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No events found in MISP for {args}")
return {}, {}
def search_attributes() -> Tuple[dict, Any]:
"""
Execute a MIPS search using the 'attributes' controller.
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'value',
'type',
'category',
'uuid',
'to_ids',
'last',
'include_decay_score'
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# Set the controller to attributes to search for attributes and not events
args['controller'] = 'attributes'
response = MISP.search(**args)
if response:
response_for_context = build_attribute_context(response)
md = f'## MISP attributes-search returned {len(response_for_context)} attributes.\n'
# if attributes were returned, display one to the warroom to visualize the result:
if len(response_for_context) > 0:
md += tableToMarkdown(f'Attribute ID: {response_for_context[0].get("ID")}', response_for_context[0])
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_ATTRIBUTE_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No attributes found in MISP for {args}")
return {}, {}
def delete_event():
"""
Gets an event id and deletes it.
"""
event_id = demisto.args().get('event_id')
event = MISP.delete_event(event_id)
if 'errors' in event:
return_error(f'Event ID: {event_id} has not found in MISP: \nError message: {event}')
else:
md = f'Event {event_id} has been deleted'
demisto.results({
'Type': entryTypes['note'],
'Contents': event,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md
})
def add_tag():
"""
Function will add tag to given UUID of event or attribute.
"""
uuid = demisto.args().get('uuid')
tag = demisto.args().get('tag')
MISP.tag(uuid, tag)
event = MISP.search(uuid=uuid)
ec = {
MISP_PATH: build_context(event)
}
md = f'Tag {tag} has been successfully added to event {uuid}'
demisto.results({
'Type': entryTypes['note'],
'Contents': event,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
})
def add_sighting():
"""Adds sighting to MISP attribute
"""
sighting = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
kargs = {
'id': demisto.args().get('id'),
'uuid': demisto.args().get('uuid'),
'type': sighting.get(demisto.args().get('type'))
}
att_id = demisto.args().get('id', demisto.args().get('uuid'))
if att_id:
MISP.set_sightings(kargs)
demisto.results(f'Sighting \'{demisto.args().get("type")}\' has been successfully added to attribute {att_id}')
else:
return_error('ID or UUID not specified')
def test():
"""
Test module.
"""
if MISP.test_connection():
demisto.results('ok')
else:
return_error('MISP has not connected.')
def add_events_from_feed():
"""Gets an OSINT feed from url and publishing them to MISP
urls with feeds for example: `https://www.misp-project.org/feeds/`
feed format must be MISP.
"""
headers = {'Accept': 'application/json'}
url = demisto.getArg('feed') # type: str
url = url[:-1] if url.endswith('/') else url
if PREDEFINED_FEEDS.get(url):
url = PREDEFINED_FEEDS[url].get('url') # type: ignore
limit = demisto.getArg('limit') # type: str
limit_int = int(limit) if limit.isdigit() else 0
osint_url = f'{url}/manifest.json'
not_added_counter = 0
try:
uri_list = requests.get(osint_url, verify=USE_SSL, headers=headers, proxies=proxies).json()
events_numbers = list() # type: List[Dict[str, int]]
for num, uri in enumerate(uri_list, 1):
req = requests.get(f'{url}/{uri}.json', verify=USE_SSL, headers=headers, proxies=proxies).json()
event = MISP.add_event(req)
if 'id' in event:
events_numbers.append({'ID': event['id']})
else:
not_added_counter += 1
# If limit exists
if limit_int == num:
break
entry_context = {MISP_PATH: events_numbers}
human_readable = tableToMarkdown(
f'Total of {len(events_numbers)} events was added to MISP.',
events_numbers,
headers='Event IDs'
)
if not_added_counter:
human_readable = f'{human_readable}\n' \
f'{not_added_counter} events were not added. Might already been added earlier.'
return_outputs(human_readable, outputs=entry_context)
except ValueError:
return_error(f'URL [{url}] is not a valid MISP feed')
def add_object(event_id: str, obj: MISPObject):
"""Sending object to MISP and returning outputs
Args:
obj: object to add to MISP
event_id: ID of event
"""
response = MISP.add_object(event_id, misp_object=obj)
if 'errors' in response:
errors = extract_error(response["errors"])
error_string = str()
for err in errors:
error_string += f'\n\tError code: {err["code"]} ' \
f'\n\tMessage: {err["message"]}' \
f'\n\tErrors: {err["errors"]}\n'
return_error(f'Error in `{command}` command: {error_string}')
for ref in obj.ObjectReference:
response = MISP.add_object_reference(ref)
formatted_response = replace_keys(response)
entry_context = {
MISP_PATH:
{
'ID': event_id
}
}
entry_context[MISP_PATH].update(formatted_response) # type: ignore
human_readable = f'Object has been added to MISP event ID {event_id}'
return_outputs(
human_readable,
entry_context,
response
) # type: ignore
def add_email_object():
entry_id = demisto.getArg('entry_id')
event_id = demisto.getArg('event_id')
email_path = demisto.getFilePath(entry_id).get('path')
obj = EMailObject(email_path)
add_object(event_id, obj)
def add_domain_object():
"""Adds a domain object to MISP
domain-ip description: https://www.misp-project.org/objects.html#_domain_ip
"""
template = 'domain-ip'
args = [
'text',
'creation_date',
'first_seen',
'last_seen'
]
event_id = demisto.getArg('event_id')
domain = demisto.getArg('name')
obj = MISPObject(template)
ips = argToList(demisto.getArg('dns'))
for ip in ips:
obj.add_attribute('ip', value=ip)
obj.add_attribute('domain', value=domain)
for arg in args:
value = demisto.getArg(arg)
if value:
obj.add_attribute(arg, value=value)
add_object(event_id, obj)
def add_url_object():
"""Building url object in MISP scheme
Scheme described https://www.misp-project.org/objects.html#_url
"""
template = 'url'
url_args = [
'text',
'last_seen',
'first_seen'
]
event_id = demisto.getArg('event_id')
url = demisto.getArg('url')
url_parse = urlparse(url)
url_obj = [
{'url': url}
]
if url_parse.scheme:
url_obj.append({'scheme': url_parse.scheme})
if url_parse.path:
url_obj.append({'resource_path': url_parse.path})
if url_parse.query:
url_obj.append({'query_string': url_parse.query})
if url_parse.netloc:
url_obj.append({'domain': url_parse.netloc})
if url_parse.fragment:
url_obj.append({'fragment': url_parse.fragment})
if url_parse.port:
url_obj.append({'port': url_parse.port})
if url_parse.username and url_parse.password:
url_obj.append({'credential': (url_parse.username, url_parse.password)})
for arg in url_args:
new_arg = demisto.getArg(arg)
if new_arg:
url_obj.append({arg.replace('_', '-'): new_arg})
g_object = build_generic_object(template, url_obj)
add_object(event_id, g_object)
def add_generic_object_command():
event_id = demisto.getArg('event_id')
template = demisto.getArg('template')
attributes = demisto.getArg('attributes') # type: str
attributes = attributes.replace("'", '"')
try:
args = json.loads(attributes)
if not isinstance(args, list):
args = build_list_from_dict(args)
obj = build_generic_object(template, args)
add_object(event_id, obj)
except ValueError as e:
return_error(f'`attribute` parameter could not be decoded, may not a valid JSON\nattribute: {attributes}',
str(e))
def add_ip_object():
template = 'ip-port'
event_id = demisto.getArg('event_id')
args = [
'dst_port',
'src_port',
'domain',
'hostname',
'ip_src',
'ip_dst'
]
attr = [{arg.replace('_', '-'): demisto.getArg(arg)} for arg in args if demisto.getArg(arg)]
ips = argToList(demisto.getArg('ip'))
for ip in ips:
attr.append({'ip': ip})
if attr:
non_req_args = [
'first_seen',
'last_seen',
]
attr.extend({arg.replace('_', '-'): demisto.getArg(arg)} for arg in non_req_args if demisto.getArg(arg))
if demisto.getArg('comment'):
attr.append({'text': demisto.getArg('comment')})
obj = build_generic_object(template, attr)
add_object(event_id, obj)
else:
return_error(f'None of required arguments presents. command {command} requires one of {args}')
''' COMMANDS MANAGER / SWITCH PANEL '''
command = demisto.command()
def main():
LOG(f'command is {command}')
demisto.info(f'command is {command}')
try:
if command == 'test-module':
# This is the call made when pressing the integration test button.
test()
elif command == 'misp-upload-sample':
upload_sample()
elif command == 'misp-download-sample':
download_file()
elif command in ('internal-misp-create-event', 'misp-create-event'):
create_event()
elif command in ('internal-misp-add-attribute', 'misp-add-attribute'):
add_attribute()
elif command == 'misp-search':
search()
elif command == 'misp-search-attributes':
search_attributes()
elif command == 'misp-delete-event':
delete_event()
elif command == 'misp-add-sighting':
add_sighting()
elif command == 'misp-add-tag':
add_tag()
elif command == 'misp-add-events-from-feed':
add_events_from_feed()
elif command == 'file':
get_files_events()
elif command == 'url':
get_urls_events()
elif command == 'ip':
get_ips_events()
# Object commands
elif command == 'misp-add-email-object':
add_email_object()
elif command == 'misp-add-domain-object':
add_domain_object()
elif command == 'misp-add-url-object':
add_url_object()
elif command == 'misp-add-ip-object':
add_ip_object()
elif command == 'misp-add-object':
add_generic_object_command()
except PyMISPError as e:
return_error(e.message)
except Exception as e:
return_error(str(e))
if __name__ in ('__builtin__', 'builtins'):
main()
# TODO: in 5.0
# * Add !file (need docker change).
|
import json, sys, os, re
from os.path import isfile, isdir, join, splitext, dirname, basename
from src.configuration import config, process_config, get_mini_config
from src.globals import config_ext, json_ext, jtree_ext
from src import gui
from src.jtree import build_jtree
resource_path = './resources'
jtree_path = './jtrees'
def load_json(filename):
"""
Loads json (with list), jsonlines or jtree
"""
json_list = []
with open(filename, 'r', encoding='utf-8') as file:
is_list = file.read(1) == '['
file.seek(0)
if is_list:
json_list = json.load(file)
else:
first_line = file.readline()
is_jsonlines = re.search('{.*}\n', first_line)
file.seek(0)
if is_jsonlines:
for line in file:
json_list.append(json.loads(line))
else:
json_list = json.load(file) # should be jtree
return json_list
def _process_run_args(config_filename, json_filename):
process_config(config_filename)
if not json_filename:
json_filename = join(dirname(config_filename), config['default_json_file'])
return json_filename
def run_explorer_save_tree(config_filename, json_filename):
"""
Saves in jtree format
"""
if not config_filename:
return
json_filename = _process_run_args(config_filename, json_filename)
json_list = load_json(json_filename)
data_tree, gui_texts, _, _ = build_jtree(json_list)
jtree = {'gui_texts': gui_texts, 'data_tree': data_tree}
os.makedirs(jtree_path, exist_ok=True)
with open(join(jtree_path, splitext(basename(config_filename))[0] + jtree_ext), 'w', encoding='utf-8') as file:
json.dump(jtree, file, indent=2)
def run_explorer_gui(config_filename=None, json_filename=None, header=None):
"""
1. Load json
2. Build jtree
3. Tree view gui
"""
json_filename = _process_run_args(config_filename, json_filename)
is_jtree = splitext(json_filename)[1] == jtree_ext
gui.open_waiting_window(header if header else config['header'], f"LOADING {"JTREE" if is_jtree else "JSON"}...")
json_list = load_json(json_filename)
if is_jtree:
data_tree = json_list['data_tree']
gui_texts = json_list['gui_texts']
else:
gui.update_waiting_window('BUILDING TREE...')
data_tree, gui_texts, all_ids, json_list = build_jtree(json_list)
gui.json_list = json_list
gui.all_ids = all_ids
gui_treedata = gui.build_gui_tree(data_tree)
gui.tree_view(gui_treedata, gui_texts)
def _get_choices_tuples(path, only_cfg=False):
"""
Finds cfg and/or jtree files in resources or jtrees dir
Then extracts their header and json filename
"""
valid_exts = (config_ext, jtree_ext) if not only_cfg else config_ext
choices = [f for f in os.listdir(path) if isfile(join(path, f)) and splitext(f)[1] in valid_exts]
for i, cfile in enumerate(choices):
if splitext(cfile)[1] == config_ext:
header, json_file = get_mini_config(path, cfile)
choices[i] = (header, cfile, json_file)
else: # jtree_ext
with open(join(path, cfile), 'r', encoding='unicode_escape', errors='strict') as file:
header_key = '"header": '
header = None
for _ in range(20): # The header should be among the first few lines
line = file.readline()
if line.find(header_key) != -1:
header = line.replace(header_key, '').split('"')[1]
break
choices[i] = (header, None, cfile)
return sorted(choices, key=lambda x: x[0])
def demo(path):
"""
Menu for demo
"""
choices = _get_choices_tuples(path)
ix = gui.radio_window([c[0] for c in choices], path)
if ix == -1:
return
config_filename = join(path, choices[ix][1]) if choices[ix][1] else None
run_explorer_gui(config_filename=config_filename, json_filename=join(path, choices[ix][2]), header=choices[ix][0])
def main(argv):
argc = len(argv)
if argc == 1:
if isdir(jtree_path):
demo(jtree_path)
else:
demo(resource_path)
elif argc == 2 and argv[1] == '-build':
demo(resource_path)
elif argc == 2 and splitext(argv[1])[1] == jtree_ext:
run_explorer_gui(config_filename=None, json_filename=argv[1])
elif argc == 2 and argv[1] == '-saveall':
choices = _get_choices_tuples(resource_path, only_cfg=True)
for c in choices:
print(c[0])
run_explorer_save_tree(join(resource_path, c[1]), join(resource_path, c[2]))
else:
config_filename, json_filename = None, None
save_jtree = False
for arg in argv[1:]:
if arg == '-save':
save_jtree = True
elif splitext(arg)[1] == config_ext:
config_filename = arg
elif splitext(arg)[1] == json_ext:
json_filename = arg
if save_jtree:
run_explorer_save_tree(config_filename=config_filename, json_filename=json_filename)
else:
run_explorer_gui(config_filename=config_filename, json_filename=json_filename)
if __name__ == "__main__":
# for item in get_choices_tuples(resource_path): print(item[0])
main(sys.argv)
| import json, sys, os, re
from os.path import isfile, isdir, join, splitext, dirname, basename
from src.configuration import config, process_config, get_mini_config
from src.globals import config_ext, json_ext, jtree_ext
from src import gui
from src.jtree import build_jtree
resource_path = './resources'
jtree_path = './jtrees'
def load_json(filename):
"""
Loads json (with list), jsonlines or jtree
"""
json_list = []
with open(filename, 'r', encoding='utf-8') as file:
is_list = file.read(1) == '['
file.seek(0)
if is_list:
json_list = json.load(file)
else:
first_line = file.readline()
is_jsonlines = re.search('{.*}\n', first_line)
file.seek(0)
if is_jsonlines:
for line in file:
json_list.append(json.loads(line))
else:
json_list = json.load(file) # should be jtree
return json_list
def _process_run_args(config_filename, json_filename):
process_config(config_filename)
if not json_filename:
json_filename = join(dirname(config_filename), config['default_json_file'])
return json_filename
def run_explorer_save_tree(config_filename, json_filename):
"""
Saves in jtree format
"""
if not config_filename:
return
json_filename = _process_run_args(config_filename, json_filename)
json_list = load_json(json_filename)
data_tree, gui_texts, _, _ = build_jtree(json_list)
jtree = {'gui_texts': gui_texts, 'data_tree': data_tree}
os.makedirs(jtree_path, exist_ok=True)
with open(join(jtree_path, splitext(basename(config_filename))[0] + jtree_ext), 'w', encoding='utf-8') as file:
json.dump(jtree, file, indent=2)
def run_explorer_gui(config_filename=None, json_filename=None, header=None):
"""
1. Load json
2. Build jtree
3. Tree view gui
"""
json_filename = _process_run_args(config_filename, json_filename)
is_jtree = splitext(json_filename)[1] == jtree_ext
gui.open_waiting_window(header if header else config['header'], f"LOADING {'JTREE' if is_jtree else 'JSON'}...")
json_list = load_json(json_filename)
if is_jtree:
data_tree = json_list['data_tree']
gui_texts = json_list['gui_texts']
else:
gui.update_waiting_window('BUILDING TREE...')
data_tree, gui_texts, all_ids, json_list = build_jtree(json_list)
gui.json_list = json_list
gui.all_ids = all_ids
gui_treedata = gui.build_gui_tree(data_tree)
gui.tree_view(gui_treedata, gui_texts)
def _get_choices_tuples(path, only_cfg=False):
"""
Finds cfg and/or jtree files in resources or jtrees dir
Then extracts their header and json filename
"""
valid_exts = (config_ext, jtree_ext) if not only_cfg else config_ext
choices = [f for f in os.listdir(path) if isfile(join(path, f)) and splitext(f)[1] in valid_exts]
for i, cfile in enumerate(choices):
if splitext(cfile)[1] == config_ext:
header, json_file = get_mini_config(path, cfile)
choices[i] = (header, cfile, json_file)
else: # jtree_ext
with open(join(path, cfile), 'r', encoding='unicode_escape', errors='strict') as file:
header_key = '"header": '
header = None
for _ in range(20): # The header should be among the first few lines
line = file.readline()
if line.find(header_key) != -1:
header = line.replace(header_key, '').split('"')[1]
break
choices[i] = (header, None, cfile)
return sorted(choices, key=lambda x: x[0])
def demo(path):
"""
Menu for demo
"""
choices = _get_choices_tuples(path)
ix = gui.radio_window([c[0] for c in choices], path)
if ix == -1:
return
config_filename = join(path, choices[ix][1]) if choices[ix][1] else None
run_explorer_gui(config_filename=config_filename, json_filename=join(path, choices[ix][2]), header=choices[ix][0])
def main(argv):
argc = len(argv)
if argc == 1:
if isdir(jtree_path):
demo(jtree_path)
else:
demo(resource_path)
elif argc == 2 and argv[1] == '-build':
demo(resource_path)
elif argc == 2 and splitext(argv[1])[1] == jtree_ext:
run_explorer_gui(config_filename=None, json_filename=argv[1])
elif argc == 2 and argv[1] == '-saveall':
choices = _get_choices_tuples(resource_path, only_cfg=True)
for c in choices:
print(c[0])
run_explorer_save_tree(join(resource_path, c[1]), join(resource_path, c[2]))
else:
config_filename, json_filename = None, None
save_jtree = False
for arg in argv[1:]:
if arg == '-save':
save_jtree = True
elif splitext(arg)[1] == config_ext:
config_filename = arg
elif splitext(arg)[1] == json_ext:
json_filename = arg
if save_jtree:
run_explorer_save_tree(config_filename=config_filename, json_filename=json_filename)
else:
run_explorer_gui(config_filename=config_filename, json_filename=json_filename)
if __name__ == "__main__":
# for item in get_choices_tuples(resource_path): print(item[0])
main(sys.argv)
|
"""Test classes."""
# pyright: basic, reportIncompatibleMethodOverride=none
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, MutableMapping, Optional, Tuple
import boto3
import yaml
from botocore.client import BaseClient
from botocore.stub import Stubber
from mock import MagicMock
from packaging.specifiers import SpecifierSet
from runway.config.components.runway import RunwayDeploymentDefinition
from runway.context import CfnginContext, RunwayContext
from runway.core.components import DeployEnvironment
from runway.utils import MutableMap
if TYPE_CHECKING:
from pathlib import Path
from runway.config import CfnginConfig
from runway.core.type_defs import RunwayActionTypeDef
class MockBoto3Session:
"""Mock class that acts like a boto3.session.
Must be preloaded with stubbers.
"""
def __init__(
self,
*,
clients: Optional[MutableMap] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
):
"""Instantiate class.
Args:
clients: Clients that have already been stubbed.
aws_access_key_id: Same as boto3.Session.
aws_secret_access_key: Same as boto3.Session.
aws_session_token: Same as boto3.Session.
profile_name: Same as boto3.Session.
region_name: Same as boto3.Session.
"""
self._clients = clients or MutableMap()
self._client_calls: Dict[str, Any] = {}
self._session = MagicMock()
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.profile_name = profile_name
self.region_name = region_name
def assert_client_called_with(self, service_name: str, **kwargs: Any) -> None:
"""Assert a client was created with the provided kwargs."""
key = f"{service_name}.{kwargs.get("region_name", self.region_name)}"
assert self._client_calls[key] == kwargs
def client(self, service_name: str, **kwargs: Any) -> BaseClient:
"""Return a stubbed client.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
Returns:
Stubbed boto3 client.
Raises:
KeyError: Client was not stubbed from Context before trying to use.
"""
key = f"{service_name}.{kwargs.get("region_name", self.region_name)}"
self._client_calls[key] = kwargs
return self._clients[key]
def register_client(
self, service_name: str, region_name: Optional[str] = None
) -> Tuple[Any, Stubber]:
"""Register a client for the boto3 session.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region_name: AWS region.
"""
key = f"{service_name}.{region_name or self.region_name}"
client = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region_name or self.region_name,
)
stubber = Stubber(client) # type: ignore
self._clients[key] = client # type: ignore
return client, stubber # type: ignore
def service(self, service_name: str, region_name: Optional[str] = None) -> None:
"""Not implimented."""
raise NotImplementedError
class MockCFNginContext(CfnginContext):
"""Subclass CFNgin context object for tests."""
def __init__(
self,
*,
config_path: Optional[Path] = None,
config: Optional[CfnginConfig] = None,
deploy_environment: Optional[DeployEnvironment] = None,
parameters: Optional[MutableMapping[str, Any]] = None,
force_stacks: Optional[List[str]] = None,
region: Optional[str] = "us-east-1",
stack_names: Optional[List[str]] = None,
**_: Any,
) -> None:
"""Instantiate class."""
self._boto3_test_client = MutableMap()
self._boto3_test_stubber = MutableMap()
# used during init process
self.s3_stubber = self.add_stubber("s3", region=region)
super().__init__(
config_path=config_path,
config=config,
deploy_environment=deploy_environment,
force_stacks=force_stacks,
parameters=parameters,
stack_names=stack_names,
)
def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber:
"""Add a stubber to context.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region: AWS region.
"""
key = f"{service_name}.{region or self.env.aws_region}"
self._boto3_test_client[key] = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region or self.env.aws_region,
)
self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key])
return self._boto3_test_stubber[key]
def get_session(
self,
*,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile: Optional[str] = None,
region: Optional[str] = None,
) -> MockBoto3Session:
"""Wrap get_session to enable stubbing."""
return MockBoto3Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
clients=self._boto3_test_client,
profile_name=profile,
region_name=region or self.env.aws_region,
)
class MockRunwayConfig(MutableMap):
"""Mock Runway config object."""
def __init__(self, **kwargs: Any) -> None:
"""Instantiate class."""
super().__init__()
self._kwargs = kwargs
self.deployments = []
self.future = MagicMock()
self.tests = []
self.ignore_git_branch = False
self.runway_version = SpecifierSet(">=1.10", prereleases=True)
self.variables = MutableMap()
# classmethods
self.find_config_file = MagicMock(
name="find_config_file", return_value="./runway.yml"
)
self.load_from_file = MagicMock(name="load_from_file", return_value=self)
def __call__(self, **kwargs: Any) -> MockRunwayConfig:
"""Mock call to return self."""
self._kwargs = kwargs
return self
class MockRunwayContext(RunwayContext):
"""Subclass Runway context object for tests."""
_use_concurrent: bool
def __init__(
self,
*,
command: Optional[RunwayActionTypeDef] = None,
deploy_environment: Any = None,
**_: Any,
) -> None:
"""Instantiate class."""
if not deploy_environment:
deploy_environment = DeployEnvironment(environ={}, explicit_name="test")
super().__init__(command=command, deploy_environment=deploy_environment)
self._boto3_test_client = MutableMap()
self._boto3_test_stubber = MutableMap()
self._use_concurrent = True
def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber:
"""Add a stubber to context.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region: AWS region name.
"""
key = f"{service_name}.{region or self.env.aws_region}"
self._boto3_test_client[key] = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region or self.env.aws_region,
**self.boto3_credentials,
)
self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key])
return self._boto3_test_stubber[key]
def get_session(
self,
*,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile: Optional[str] = None,
region: Optional[str] = None,
) -> MockBoto3Session:
"""Wrap get_session to enable stubbing."""
return MockBoto3Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
clients=self._boto3_test_client,
profile_name=profile,
region_name=region or self.env.aws_region,
)
@property
def use_concurrent(self) -> bool: # pylint: disable=invalid-overridden-method
"""Override property of parent with something that can be set."""
return self._use_concurrent
@use_concurrent.setter # type: ignore
def use_concurrent( # pylint: disable=invalid-overridden-method
self, value: bool
) -> None:
"""Override property of parent with something that can be set.
Args:
value: New value for the attribute.
"""
self._use_concurrent = value
class YamlLoader:
"""Load YAML files from a directory."""
def __init__(
self, root: Path, load_class: Optional[type] = None, load_type: str = "default"
) -> None:
"""Instantiate class.
Args:
root: Root directory.
load_class: Class to use with load method.
load_type: Contolls how content is passed to the load_class.
"""
self.load_class = load_class
self.load_type = load_type
root.absolute()
self.root = root
def get(self, file_name: str) -> Any:
"""Get raw YAML file contents.
Args:
file_name: Name of the file to load.
Returns:
Content of the file loaded by PyYAML.
"""
if not file_name.endswith(".yml") or not file_name.endswith(".yaml"):
file_name += ".yml"
content = (self.root / file_name).read_text()
return yaml.safe_load(content)
def load(self, file_name: str) -> Any:
"""Load YAML file contents.
Args:
file_name (str): Name of the file to load.
Returns:
Any
"""
if not self.load_class:
raise ValueError("load_class must be set to use this method")
if self.load_type == "default":
return self.load_class(self.get(file_name))
if self.load_type == "kwargs":
return self.load_class(**self.get(file_name))
raise ValueError(f'invalid load_type; "{self.load_type}"')
class YamlLoaderDeployment(YamlLoader):
"""Load deployment YAML files from a directory."""
def __init__(self, root: Path) -> None:
"""Instantiate class.
Args:
root: Root directory.
"""
super().__init__(root, load_class=RunwayDeploymentDefinition)
def load(self, file_name: str) -> RunwayDeploymentDefinition:
"""Load YAML file contents.
Args:
file_name: Name of the file to load.
"""
return self.load_class.parse_obj(self.get(file_name)) # type: ignore
| """Test classes."""
# pyright: basic, reportIncompatibleMethodOverride=none
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, MutableMapping, Optional, Tuple
import boto3
import yaml
from botocore.client import BaseClient
from botocore.stub import Stubber
from mock import MagicMock
from packaging.specifiers import SpecifierSet
from runway.config.components.runway import RunwayDeploymentDefinition
from runway.context import CfnginContext, RunwayContext
from runway.core.components import DeployEnvironment
from runway.utils import MutableMap
if TYPE_CHECKING:
from pathlib import Path
from runway.config import CfnginConfig
from runway.core.type_defs import RunwayActionTypeDef
class MockBoto3Session:
"""Mock class that acts like a boto3.session.
Must be preloaded with stubbers.
"""
def __init__(
self,
*,
clients: Optional[MutableMap] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
):
"""Instantiate class.
Args:
clients: Clients that have already been stubbed.
aws_access_key_id: Same as boto3.Session.
aws_secret_access_key: Same as boto3.Session.
aws_session_token: Same as boto3.Session.
profile_name: Same as boto3.Session.
region_name: Same as boto3.Session.
"""
self._clients = clients or MutableMap()
self._client_calls: Dict[str, Any] = {}
self._session = MagicMock()
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.profile_name = profile_name
self.region_name = region_name
def assert_client_called_with(self, service_name: str, **kwargs: Any) -> None:
"""Assert a client was created with the provided kwargs."""
key = f"{service_name}.{kwargs.get('region_name', self.region_name)}"
assert self._client_calls[key] == kwargs
def client(self, service_name: str, **kwargs: Any) -> BaseClient:
"""Return a stubbed client.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
Returns:
Stubbed boto3 client.
Raises:
KeyError: Client was not stubbed from Context before trying to use.
"""
key = f"{service_name}.{kwargs.get('region_name', self.region_name)}"
self._client_calls[key] = kwargs
return self._clients[key]
def register_client(
self, service_name: str, region_name: Optional[str] = None
) -> Tuple[Any, Stubber]:
"""Register a client for the boto3 session.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region_name: AWS region.
"""
key = f"{service_name}.{region_name or self.region_name}"
client = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region_name or self.region_name,
)
stubber = Stubber(client) # type: ignore
self._clients[key] = client # type: ignore
return client, stubber # type: ignore
def service(self, service_name: str, region_name: Optional[str] = None) -> None:
"""Not implimented."""
raise NotImplementedError
class MockCFNginContext(CfnginContext):
"""Subclass CFNgin context object for tests."""
def __init__(
self,
*,
config_path: Optional[Path] = None,
config: Optional[CfnginConfig] = None,
deploy_environment: Optional[DeployEnvironment] = None,
parameters: Optional[MutableMapping[str, Any]] = None,
force_stacks: Optional[List[str]] = None,
region: Optional[str] = "us-east-1",
stack_names: Optional[List[str]] = None,
**_: Any,
) -> None:
"""Instantiate class."""
self._boto3_test_client = MutableMap()
self._boto3_test_stubber = MutableMap()
# used during init process
self.s3_stubber = self.add_stubber("s3", region=region)
super().__init__(
config_path=config_path,
config=config,
deploy_environment=deploy_environment,
force_stacks=force_stacks,
parameters=parameters,
stack_names=stack_names,
)
def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber:
"""Add a stubber to context.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region: AWS region.
"""
key = f"{service_name}.{region or self.env.aws_region}"
self._boto3_test_client[key] = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region or self.env.aws_region,
)
self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key])
return self._boto3_test_stubber[key]
def get_session(
self,
*,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile: Optional[str] = None,
region: Optional[str] = None,
) -> MockBoto3Session:
"""Wrap get_session to enable stubbing."""
return MockBoto3Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
clients=self._boto3_test_client,
profile_name=profile,
region_name=region or self.env.aws_region,
)
class MockRunwayConfig(MutableMap):
"""Mock Runway config object."""
def __init__(self, **kwargs: Any) -> None:
"""Instantiate class."""
super().__init__()
self._kwargs = kwargs
self.deployments = []
self.future = MagicMock()
self.tests = []
self.ignore_git_branch = False
self.runway_version = SpecifierSet(">=1.10", prereleases=True)
self.variables = MutableMap()
# classmethods
self.find_config_file = MagicMock(
name="find_config_file", return_value="./runway.yml"
)
self.load_from_file = MagicMock(name="load_from_file", return_value=self)
def __call__(self, **kwargs: Any) -> MockRunwayConfig:
"""Mock call to return self."""
self._kwargs = kwargs
return self
class MockRunwayContext(RunwayContext):
"""Subclass Runway context object for tests."""
_use_concurrent: bool
def __init__(
self,
*,
command: Optional[RunwayActionTypeDef] = None,
deploy_environment: Any = None,
**_: Any,
) -> None:
"""Instantiate class."""
if not deploy_environment:
deploy_environment = DeployEnvironment(environ={}, explicit_name="test")
super().__init__(command=command, deploy_environment=deploy_environment)
self._boto3_test_client = MutableMap()
self._boto3_test_stubber = MutableMap()
self._use_concurrent = True
def add_stubber(self, service_name: str, region: Optional[str] = None) -> Stubber:
"""Add a stubber to context.
Args:
service_name: The name of a service, e.g. 's3' or 'ec2'.
region: AWS region name.
"""
key = f"{service_name}.{region or self.env.aws_region}"
self._boto3_test_client[key] = boto3.client( # type: ignore
service_name, # type: ignore
region_name=region or self.env.aws_region,
**self.boto3_credentials,
)
self._boto3_test_stubber[key] = Stubber(self._boto3_test_client[key])
return self._boto3_test_stubber[key]
def get_session(
self,
*,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
profile: Optional[str] = None,
region: Optional[str] = None,
) -> MockBoto3Session:
"""Wrap get_session to enable stubbing."""
return MockBoto3Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
clients=self._boto3_test_client,
profile_name=profile,
region_name=region or self.env.aws_region,
)
@property
def use_concurrent(self) -> bool: # pylint: disable=invalid-overridden-method
"""Override property of parent with something that can be set."""
return self._use_concurrent
@use_concurrent.setter # type: ignore
def use_concurrent( # pylint: disable=invalid-overridden-method
self, value: bool
) -> None:
"""Override property of parent with something that can be set.
Args:
value: New value for the attribute.
"""
self._use_concurrent = value
class YamlLoader:
"""Load YAML files from a directory."""
def __init__(
self, root: Path, load_class: Optional[type] = None, load_type: str = "default"
) -> None:
"""Instantiate class.
Args:
root: Root directory.
load_class: Class to use with load method.
load_type: Contolls how content is passed to the load_class.
"""
self.load_class = load_class
self.load_type = load_type
root.absolute()
self.root = root
def get(self, file_name: str) -> Any:
"""Get raw YAML file contents.
Args:
file_name: Name of the file to load.
Returns:
Content of the file loaded by PyYAML.
"""
if not file_name.endswith(".yml") or not file_name.endswith(".yaml"):
file_name += ".yml"
content = (self.root / file_name).read_text()
return yaml.safe_load(content)
def load(self, file_name: str) -> Any:
"""Load YAML file contents.
Args:
file_name (str): Name of the file to load.
Returns:
Any
"""
if not self.load_class:
raise ValueError("load_class must be set to use this method")
if self.load_type == "default":
return self.load_class(self.get(file_name))
if self.load_type == "kwargs":
return self.load_class(**self.get(file_name))
raise ValueError(f'invalid load_type; "{self.load_type}"')
class YamlLoaderDeployment(YamlLoader):
"""Load deployment YAML files from a directory."""
def __init__(self, root: Path) -> None:
"""Instantiate class.
Args:
root: Root directory.
"""
super().__init__(root, load_class=RunwayDeploymentDefinition)
def load(self, file_name: str) -> RunwayDeploymentDefinition:
"""Load YAML file contents.
Args:
file_name: Name of the file to load.
"""
return self.load_class.parse_obj(self.get(file_name)) # type: ignore
|
import numpy as np
import xarray as xr
from IPython.display import display_html
from xarray.core.formatting_html import dataset_repr
from xarray.core.options import OPTIONS as XR_OPTIONS
from .alignment import return_inits_and_verif_dates
from .bias_removal import mean_bias_removal
from .bootstrap import (
bootstrap_hindcast,
bootstrap_perfect_model,
bootstrap_uninit_pm_ensemble_from_control_cftime,
)
from .checks import (
has_dataset,
has_dims,
has_valid_lead_units,
is_xarray,
match_calendars,
match_initialized_dims,
match_initialized_vars,
)
from .constants import CONCAT_KWARGS
from .exceptions import DimensionError, VariableError
from .graphics import plot_ensemble_perfect_model, plot_lead_timeseries_hindcast
from .prediction import (
_apply_metric_at_given_lead,
_get_metric_comparison_dim,
compute_perfect_model,
)
from .reference import compute_persistence
from .smoothing import (
_reset_temporal_axis,
smooth_goddard_2013,
spatial_smoothing_xesmf,
temporal_smoothing,
)
from .utils import convert_time_index
def _display_metadata(self):
"""
This is called in the following case:
```
dp = cp.HindcastEnsemble(dple)
print(dp)
```
"""
SPACE = " "
header = f"<climpred.{type(self).__name__}>"
summary = header + "\nInitialized Ensemble:\n"
summary += SPACE + str(self._datasets["initialized"].data_vars)[18:].strip() + "\n"
if isinstance(self, HindcastEnsemble):
# Prints out observations and associated variables if they exist.
# If not, just write "None".
summary += "Observations:\n"
if any(self._datasets["observations"]):
num_obs = len(self._datasets["observations"].data_vars)
for i in range(1, num_obs + 1):
summary += (
SPACE
+ str(self._datasets["observations"].data_vars)
.split("\n")[i]
.strip()
+ "\n"
)
else:
summary += SPACE + "None\n"
elif isinstance(self, PerfectModelEnsemble):
summary += "Control:\n"
# Prints out control variables if a control is appended. If not,
# just write "None".
if any(self._datasets["control"]):
num_ctrl = len(self._datasets["control"].data_vars)
for i in range(1, num_ctrl + 1):
summary += (
SPACE
+ str(self._datasets["control"].data_vars).split("\n")[i].strip()
+ "\n"
)
else:
summary += SPACE + "None\n"
if any(self._datasets["uninitialized"]):
summary += "Uninitialized:\n"
summary += SPACE + str(self._datasets["uninitialized"].data_vars)[18:].strip()
else:
summary += "Uninitialized:\n"
summary += SPACE + "None"
return summary
def _display_metadata_html(self):
header = f"<h4>climpred.{type(self).__name__}</h4>"
display_html(header, raw=True)
init_repr_str = dataset_repr(self._datasets["initialized"])
init_repr_str = init_repr_str.replace("xarray.Dataset", "Initialized Ensemble")
display_html(init_repr_str, raw=True)
if isinstance(self, HindcastEnsemble):
if any(self._datasets["observations"]):
obs_repr_str = dataset_repr(self._datasets["observations"])
obs_repr_str = obs_repr_str.replace("xarray.Dataset", "Observations")
display_html(obs_repr_str, raw=True)
elif isinstance(self, PerfectModelEnsemble):
if any(self._datasets["control"]):
control_repr_str = dataset_repr(self._datasets["control"])
control_repr_str = control_repr_str.replace(
"xarray.Dataset", "Control Simulation"
)
display_html(control_repr_str, raw=True)
if any(self._datasets["uninitialized"]):
uninit_repr_str = dataset_repr(self._datasets["uninitialized"])
uninit_repr_str = uninit_repr_str.replace("xarray.Dataset", "Uninitialized")
display_html(uninit_repr_str, raw=True)
# better would be to aggregate repr_strs and then all return but this fails
# TypeError: __repr__ returned non-string (type NoneType)
# workaround return empty string
return ""
class PredictionEnsemble:
"""
The main object. This is the super of both `PerfectModelEnsemble` and
`HindcastEnsemble`. This cannot be called directly by a user, but
should house functions that both ensemble types can use.
"""
@is_xarray(1)
def __init__(self, xobj):
if isinstance(xobj, xr.DataArray):
# makes applying prediction functions easier, etc.
xobj = xobj.to_dataset()
has_dims(xobj, ["init", "lead"], "PredictionEnsemble")
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "init", "xobj[init]")
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(xobj)
# Add initialized dictionary and reserve sub-dictionary for an uninitialized
# run.
self._datasets = {"initialized": xobj, "uninitialized": {}}
self.kind = "prediction"
self._temporally_smoothed = None
self._is_annual_lead = None
# when you just print it interactively
# https://stackoverflow.com/questions/1535327/how-to-print-objects-of-class-using-print
def __repr__(self):
if XR_OPTIONS["display_style"] == "html":
return _display_metadata_html(self)
else:
return _display_metadata(self)
def plot(self, variable=None, ax=None, show_members=False, cmap=None):
"""Plot datasets from PredictionEnsemble.
Args:
variable (str or None): `variable` to show. Defaults to first in data_vars.
ax (plt.axes): Axis to use in plotting. By default, creates a new axis.
show_members (bool): whether to display all members individually.
Defaults to False.
cmap (str): Name of matplotlib-recognized colorbar. Defaults to `jet` for
`HindcastEnsemble` and `tab10` for `PerfectModelEnsemble`.
Returns:
ax: plt.axes
"""
if self.kind == "hindcast":
if cmap is None:
cmap = "jet"
return plot_lead_timeseries_hindcast(
self, variable=variable, ax=ax, show_members=show_members, cmap=cmap
)
elif self.kind == "perfect":
if cmap is None:
cmap = "tab10"
return plot_ensemble_perfect_model(
self, variable=variable, ax=ax, show_members=show_members, cmap=cmap
)
def _math(self, other, operator):
"""Helper function for __add__, __sub__, __mul__, __truediv__.
Allows math operations with type:
- int
- float
- np.ndarray
- xr.DataArray without new dimensions
- xr.Dataset without new dimensions or variables
"""
assert isinstance(operator, str)
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
ALLOWED_TYPES_FOR_MATH_OPERATORS = [
int,
float,
np.ndarray,
xr.DataArray,
xr.Dataset,
type(self),
]
OPERATOR_STR = {
"add": "+",
"sub": "-",
"mul": "*",
"div": "/",
}
error_str = f"Cannot use {type(self)} {OPERATOR_STR[operator]} {type(other)}"
# catch undefined types for other
if not isinstance(other, tuple(ALLOWED_TYPES_FOR_MATH_OPERATORS)):
raise TypeError(
f"{error_str} because type {type(other)} not supported. "
f"Please choose from {ALLOWED_TYPES_FOR_MATH_OPERATORS}."
)
# catch other dimensions in other
if isinstance(other, tuple([xr.Dataset, xr.DataArray])):
if not set(other.dims).issubset(self._datasets["initialized"].dims):
raise DimensionError(f"{error_str} containing new dimensions.")
# catch xr.Dataset with different data_vars
if isinstance(other, xr.Dataset):
if list(other.data_vars) != list(self._datasets["initialized"].data_vars):
raise VariableError(
f"{error_str} with new `data_vars`. Please use {type(self)} "
f"{operator} {type(other)} only with same `data_vars`. Found "
f"initialized.data_vars = "
f' {list(self._datasets['initialized'].data_vars)} vs. '
f"other.data_vars = { list(other.data_vars)}."
)
operator = eval(operator)
if isinstance(other, PredictionEnsemble):
# Create temporary copy to modify to avoid inplace operation.
datasets = self._datasets.copy()
for dataset in datasets:
other_dataset = other._datasets[dataset]
# Some pre-allocated entries might be empty, such as 'uninitialized'
if self._datasets[dataset]:
# Loop through observations if there are multiple
if dataset == "observations" and isinstance(
self._datasets[dataset], dict
):
obs_datasets = self._datasets["observations"].copy()
for obs_dataset in obs_datasets:
other_obs_dataset = other._datasets["observations"][
obs_dataset
]
obs_datasets.update(
{
obs_dataset: operator(
obs_datasets[obs_dataset], other_obs_dataset
)
}
)
datasets.update({"observations": obs_datasets})
else:
if datasets[dataset]:
datasets.update(
{dataset: operator(datasets[dataset], other_dataset)}
)
return self._construct_direct(datasets, kind=self.kind)
else:
return self._apply_func(operator, other)
def __add__(self, other):
return self._math(other, operator="add")
def __sub__(self, other):
return self._math(other, operator="sub")
def __mul__(self, other):
return self._math(other, operator="mul")
def __truediv__(self, other):
return self._math(other, operator="div")
def __getitem__(self, varlist):
"""Allows subsetting data variable from PredictionEnsemble as from xr.Dataset.
Args:
* varlist (list of str, str): list of names or name of data variable(s) to
subselect
"""
if isinstance(varlist, str):
varlist = [varlist]
if not isinstance(varlist, list):
raise ValueError(
"Please subset PredictionEnsemble as you would subset an xr.Dataset "
"with a list or single string of variable name(s), found "
f"{type(varlist)}."
)
def sel_vars(ds, varlist):
return ds[varlist]
return self._apply_func(sel_vars, varlist)
def __getattr__(self, name):
"""Allows for xarray methods to be applied to our prediction objects.
Args:
* name: Function, e.g., .isel() or .sum().
"""
def wrapper(*args, **kwargs):
"""Applies arbitrary function to all datasets in the PredictionEnsemble
object.
Got this from: https://stackoverflow.com/questions/41919499/
how-to-call-undefined-methods-sequentially-in-python-class
"""
def _apply_xr_func(v, name, *args, **kwargs):
"""Handles exceptions in our dictionary comprehension.
In other words, this will skip applying the arbitrary function
to a sub-dataset if a ValueError is thrown. This specifically
targets cases where certain datasets don't have the given
dim that's being called. E.g., ``.isel(lead=0)`` should only
be applied to the initialized dataset.
Reference:
* https://stackoverflow.com/questions/1528237/
how-to-handle-exceptions-in-a-list-comprehensions
"""
try:
return getattr(v, name)(*args, **kwargs)
# ValueError : Cases such as .sum(dim='time'). This doesn't apply
# it to the given dataset if the dimension doesn't exist.
# KeyError : Cases where a function calls the index of a Dataset. Such
# as ds[dim] and the dim doesn't exist as a key.
# DimensionError: This accounts for our custom error when applying
# some stats functions.
except (ValueError, KeyError, DimensionError):
return v
return self._apply_func(_apply_xr_func, name, *args, **kwargs)
return wrapper
@classmethod
def _construct_direct(cls, datasets, kind):
"""Shortcut around __init__ for internal use to avoid inplace
operations.
Pulled from xarrray Dataset class.
https://github.com/pydata/xarray/blob/master/xarray/core/dataset.py
"""
obj = object.__new__(cls)
obj._datasets = datasets
obj.kind = kind
return obj
def _apply_func(self, func, *args, **kwargs):
"""Apply a function to all datasets in a `PredictionEnsemble`."""
# Create temporary copy to modify to avoid inplace operation.
datasets = self._datasets.copy()
# More explicit than nested dictionary comprehension.
for outer_k, outer_v in datasets.items():
# If initialized, control, uninitialized and just a singular
# dataset, apply the function directly to it.
if isinstance(outer_v, xr.Dataset):
datasets.update({outer_k: func(outer_v, *args, **kwargs)})
else:
# If a nested dictionary is encountered (i.e., a set of
# observations) apply to each individually.
#
# Similar to the ``add_observations`` method, this only seems to
# avoid inplace operations by copying the nested dictionary
# separately and then updating the main dictionary.
temporary_dataset = self._datasets[outer_k].copy()
for inner_k, inner_v in temporary_dataset.items():
temporary_dataset.update({inner_k: func(inner_v, *args, **kwargs)})
datasets.update({outer_k: temporary_dataset})
# Instantiates new object with the modified datasets.
return self._construct_direct(datasets, kind=self.kind)
def get_initialized(self):
"""Returns the xarray dataset for the initialized ensemble."""
return self._datasets["initialized"]
def get_uninitialized(self):
"""Returns the xarray dataset for the uninitialized ensemble."""
return self._datasets["uninitialized"]
def smooth(self, smooth_kws=None, how="mean", **xesmf_kwargs):
"""Smooth all entries of PredictionEnsemble in the same manner to be
able to still calculate prediction skill afterwards.
Args:
smooth_kws (dict or str): Dictionary to specify the dims to
smooth compatible with
:py:func:`~climpred.smoothing.spatial_smoothing_xesmf` or
:py:func:`~climpred.smoothing.temporal_smoothing`.
Shortcut for Goddard et al. 2013 recommendations:
'goddard2013'. Defaults to None.
how (str): how to smooth temporally. From ['mean','sum']. Defaults to
'mean'.
**xesmf_kwargs (args): kwargs passed to
:py:func:`~climpred.smoothing.spatial_smoothing_xesmf`
Examples:
>>> PredictionEnsemble.smooth({'lead': 2, 'lat': 5, 'lon': 4'})
>>> PredictionEnsemble.smooth('goddard2013')
>>> PredictionEnsemble.smooth({'lon':1, 'lat':1}, method='patch')
>>> PredictionEnsemble.smooth({'lead':2}, how='sum')
"""
if not smooth_kws:
return self
# get proper smoothing function based on smooth args
if isinstance(smooth_kws, str):
if "goddard" in smooth_kws:
if self._is_annual_lead:
smooth_fct = smooth_goddard_2013
tsmooth_kws = {"lead": 4} # default
d_lon_lat_kws = {"lon": 5, "lat": 5} # default
else:
raise ValueError(
"`goddard2013` smoothing only available for annual leads."
)
else:
raise ValueError(
'Please provide from list of available smoothings: \
["goddard2013"]'
)
# TODO: actively searches for lot and lat in dims. Maybe this part of the code
# could be more robust in how it finds these two spatial dimensions regardless
# of name. Optional work in progress comment.
elif isinstance(smooth_kws, dict):
non_time_dims = [
dim for dim in smooth_kws.keys() if dim not in ["time", "lead"]
]
if len(non_time_dims) > 0:
non_time_dims = non_time_dims[0]
# goddard when time_dim and lon/lat given
if ("lon" in smooth_kws or "lat" in smooth_kws) and (
"lead" in smooth_kws or "time" in smooth_kws
):
smooth_fct = smooth_goddard_2013
# separate lon, lat keywords into d_lon_lat_kws
d_lon_lat_kws = dict()
tsmooth_kws = dict()
for c in ["lon", "lat"]:
if c in smooth_kws:
d_lon_lat_kws[c] = smooth_kws[c]
else:
tsmooth_kws[c] = smooth_kws[c]
# else only one smoothing operation
elif "lon" in smooth_kws or "lat" in smooth_kws:
smooth_fct = spatial_smoothing_xesmf
d_lon_lat_kws = smooth_kws
tsmooth_kws = None
elif "lead" in smooth_kws or "time" in smooth_kws:
smooth_fct = temporal_smoothing
d_lon_lat_kws = None
tsmooth_kws = smooth_kws
else:
raise ValueError(
'Please provide kwargs to fulfill functions: \
["spatial_smoothing_xesmf", "temporal_smoothing"].'
)
else:
raise ValueError(
"Please provide kwargs as dict or str and not", type(smooth_kws)
)
self = self.map(
smooth_fct,
tsmooth_kws=tsmooth_kws,
d_lon_lat_kws=d_lon_lat_kws,
how=how,
**xesmf_kwargs,
)
if smooth_fct == smooth_goddard_2013 or smooth_fct == temporal_smoothing:
self._temporally_smoothed = tsmooth_kws
return self
class PerfectModelEnsemble(PredictionEnsemble):
"""An object for "perfect model" climate prediction ensembles.
`PerfectModelEnsemble` is a sub-class of `PredictionEnsemble`. It tracks
the control run used to initialize the ensemble for easy computations,
bootstrapping, etc.
This object is built on `xarray` and thus requires the input object to
be an `xarray` Dataset or DataArray.
"""
def __init__(self, xobj):
"""Create a `PerfectModelEnsemble` object by inputting output from the
control run in `xarray` format.
Args:
xobj (xarray object):
decadal prediction ensemble output.
Attributes:
control: Dictionary of control run associated with the initialized
ensemble.
uninitialized: Dictionary of uninitialized run that is
bootstrapped from the initialized run.
"""
super().__init__(xobj)
# Reserve sub-dictionary for the control simulation.
self._datasets.update({"control": {}})
self.kind = "perfect"
def _apply_climpred_function(self, func, input_dict=None, **kwargs):
"""Helper function to loop through observations and apply an arbitrary climpred
function.
Args:
func (function): climpred function to apply to object.
input_dict (dict): dictionary with the following things:
* ensemble: initialized or uninitialized ensemble.
* control: control dictionary from HindcastEnsemble.
* init (bool): True if the initialized ensemble, False if uninitialized.
"""
ensemble = input_dict["ensemble"]
control = input_dict["control"]
init = input_dict["init"]
init_vars, ctrl_vars = self._vars_to_drop(init=init)
ensemble = ensemble.drop_vars(init_vars)
if control:
control = control.drop_vars(ctrl_vars)
return func(ensemble, control, **kwargs)
def _vars_to_drop(self, init=True):
"""Returns list of variables to drop when comparing
initialized/uninitialized to a control.
This is useful if the two products being compared do not share the same
variables. I.e., if the control has ['SST'] and the initialized has
['SST', 'SALT'], this will return a list with ['SALT'] to be dropped
from the initialized.
Args:
init (bool, default True):
If `True`, check variables on the initialized.
If `False`, check variables on the uninitialized.
Returns:
Lists of variables to drop from the initialized/uninitialized
and control Datasets.
"""
init_str = "initialized" if init else "uninitialized"
init_vars = list(self._datasets[init_str])
# only drop if control present
if self._datasets["control"]:
ctrl_vars = list(self._datasets["control"])
# Make lists of variables to drop that aren't in common
# with one another.
init_vars_to_drop = list(set(init_vars) - set(ctrl_vars))
ctrl_vars_to_drop = list(set(ctrl_vars) - set(init_vars))
else:
init_vars_to_drop, ctrl_vars_to_drop = [], []
return init_vars_to_drop, ctrl_vars_to_drop
@is_xarray(1)
def add_control(self, xobj):
"""Add the control run that initialized the climate prediction
ensemble.
Args:
xobj (xarray object): Dataset/DataArray of the control run.
"""
# NOTE: These should all be decorators.
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj, kind2="control")
datasets = self._datasets.copy()
datasets.update({"control": xobj})
return self._construct_direct(datasets, kind="perfect")
def generate_uninitialized(self):
"""Generate an uninitialized ensemble by bootstrapping the
initialized prediction ensemble.
Returns:
Bootstrapped (uninitialized) ensemble as a Dataset.
"""
has_dataset(
self._datasets["control"], "control", "generate an uninitialized ensemble."
)
uninit = bootstrap_uninit_pm_ensemble_from_control_cftime(
self._datasets["initialized"], self._datasets["control"]
)
datasets = self._datasets.copy()
datasets.update({"uninitialized": uninit})
return self._construct_direct(datasets, kind="perfect")
def get_control(self):
"""Returns the control as an xarray dataset."""
return self._datasets["control"]
def verify(
self,
metric=None,
comparison=None,
dim=None,
reference=None,
**metric_kwargs,
):
"""Verify initialized predictions against a configuration of other ensemble members.
.. note::
The configuration of the other ensemble members is based off of the
``comparison`` keyword argument.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply in the
comparison. See `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare the initialized prediction ensemble with itself, see
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply ``metric``.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None`` meaning that all dimensions
other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset of comparison results with ``skill`` dimension for verification
results for the initialized ensemble (``init``) and any reference forecasts
verified.
"""
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"]
if isinstance(self._datasets["control"], xr.Dataset)
else None,
"init": True,
}
result = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
result = _reset_temporal_axis(result, self._temporally_smoothed, dim="lead")
# compute reference skills
if isinstance(reference, str):
reference = [reference]
if reference:
for r in reference:
ref_compute_kwargs = metric_kwargs.copy()
ref_compute_kwargs["metric"] = metric
if r != "persistence":
ref_compute_kwargs["comparison"] = comparison
ref = getattr(self, f"_compute_{r}")(**ref_compute_kwargs)
result = xr.concat([result, ref], dim="skill", **CONCAT_KWARGS)
result = result.assign_coords(skill=["initialized"] + reference)
return result.squeeze()
def _compute_uninitialized(
self, metric=None, comparison=None, dim=None, **metric_kwargs
):
"""Verify the bootstrapped uninitialized run against itself.
.. note::
The configuration of the other ensemble members is based off of the
``comparison`` keyword argument.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply in the
comparison. See `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare the uninitialized against itself, see
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None``, meaning that all dimensions
other than ``lead`` are reduced.
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset with dimension skill containing initialized and reference skill(s).
"""
has_dataset(
self._datasets["uninitialized"],
"uninitialized",
"compute an uninitialized metric",
)
input_dict = {
"ensemble": self._datasets["uninitialized"],
"control": self._datasets["control"]
if isinstance(self._datasets["control"], xr.Dataset)
else None,
"init": False,
}
res = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def _compute_persistence(self, metric=None, dim=None, **metric_kwargs):
"""Verify a simple persistence forecast of the control run against itself.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to use when
verifying skill of the persistence forecast. See `metrics </metrics.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None``, meaning that all dimensions
other than ``lead`` are reduced.
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset of persistence forecast results.
Reference:
* Chapter 8 (Short-Term Climate Prediction) in
Van den Dool, Huug. Empirical methods in short-term climate
prediction. Oxford University Press, 2007.
"""
has_dataset(
self._datasets["control"], "control", "compute a persistence forecast"
)
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"],
"init": True,
}
if dim is None:
dim = list(self._datasets["initialized"].dims)
for d in ["member", "lead"]:
if d in dim:
dim.remove(d)
res = self._apply_climpred_function(
compute_persistence,
input_dict=input_dict,
metric=metric,
alignment="same_inits",
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def bootstrap(
self,
metric=None,
comparison=None,
dim=None,
reference=None,
iterations=None,
sig=95,
pers_sig=None,
**metric_kwargs,
):
"""Bootstrap with replacement according to Goddard et al. 2013.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to verify
bootstrapped skill, see `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): Comparison
passed to verify, see `comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None`` meaning that all dimensions
other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
iterations (int): Number of resampling iterations for bootstrapping with
replacement. Recommended >= 500.
sig (int, default 95): Significance level in percent for deciding whether
uninitialized and persistence beat initialized skill.
pers_sig (int): If not ``None``, the separate significance level for
persistence. Defaults to ``None``, or the same significance as ``sig``.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
xr.Datasets: with dimensions ``result`` (holding ``verify skill``, ``p``,
``low_ci`` and ``high_ci``) and ``skill`` (holding ``initialized``,
``persistence`` and/or ``uninitialized``):
* result='verify skill', skill='initialized':
mean initialized skill
* result='high_ci', skill='initialized':
high confidence interval boundary for initialized skill
* result='p', skill='uninitialized':
p value of the hypothesis that the
difference of skill between the initialized and
uninitialized simulations is smaller or equal to zero
based on bootstrapping with replacement.
* result='p', skill='persistence':
p value of the hypothesis that the
difference of skill between the initialized and persistenceistence
simulations is smaller or equal to zero based on
bootstrapping with replacement.
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
"""
if iterations is None:
raise ValueError("Designate number of bootstrapping `iterations`.")
has_dataset(self._datasets["control"], "control", "iteration")
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"],
"init": True,
}
return self._apply_climpred_function(
bootstrap_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
reference=reference,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
**metric_kwargs,
)
class HindcastEnsemble(PredictionEnsemble):
"""An object for climate prediction ensembles initialized by a data-like
product.
`HindcastEnsemble` is a sub-class of `PredictionEnsemble`. It tracks a single
verification dataset (i.e., observations) associated with the hindcast ensemble
for easy computation across multiple variables.
This object is built on `xarray` and thus requires the input object to
be an `xarray` Dataset or DataArray.
"""
def __init__(self, xobj):
"""Create a `HindcastEnsemble` object by inputting output from a
prediction ensemble in `xarray` format.
Args:
xobj (xarray object):
decadal prediction ensemble output.
Attributes:
observations: Dictionary of verification data to associate with the decadal
prediction ensemble.
uninitialized: Dictionary of companion (or bootstrapped)
uninitialized ensemble run.
"""
super().__init__(xobj)
self._datasets.update({"observations": {}})
self.kind = "hindcast"
def _apply_climpred_function(self, func, init, **kwargs):
"""Helper function to loop through verification data and apply an arbitrary
climpred function.
Args:
func (function): climpred function to apply to object.
init (bool): Whether or not it's the initialized ensemble.
"""
hind = self._datasets["initialized"]
verif = self._datasets["observations"]
drop_init, drop_obs = self._vars_to_drop(init=init)
return func(hind.drop_vars(drop_init), verif.drop_vars(drop_obs), **kwargs)
def _vars_to_drop(self, init=True):
"""Returns list of variables to drop when comparing
initialized/uninitialized to observations.
This is useful if the two products being compared do not share the same
variables. I.e., if the observations have ['SST'] and the initialized has
['SST', 'SALT'], this will return a list with ['SALT'] to be dropped
from the initialized.
Args:
init (bool, default True):
If ``True``, check variables on the initialized.
If ``False``, check variables on the uninitialized.
Returns:
Lists of variables to drop from the initialized/uninitialized
and observational Datasets.
"""
if init:
init_vars = [var for var in self._datasets["initialized"].data_vars]
else:
init_vars = [var for var in self._datasets["uninitialized"].data_vars]
obs_vars = [var for var in self._datasets["observations"].data_vars]
# Make lists of variables to drop that aren't in common
# with one another.
init_vars_to_drop = list(set(init_vars) - set(obs_vars))
obs_vars_to_drop = list(set(obs_vars) - set(init_vars))
return init_vars_to_drop, obs_vars_to_drop
@is_xarray(1)
def add_observations(self, xobj):
"""Add verification data against which to verify the initialized ensemble.
Args:
xobj (xarray object): Dataset/DataArray to append to the
``HindcastEnsemble`` object.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that time is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj)
datasets = self._datasets.copy()
datasets.update({"observations": xobj})
return self._construct_direct(datasets, kind="hindcast")
@is_xarray(1)
def add_uninitialized(self, xobj):
"""Add a companion uninitialized ensemble for comparison to verification data.
Args:
xobj (xarray object): Dataset/DataArray of the uninitialzed
ensemble.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj, uninitialized=True)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj, kind2="uninitialized")
datasets = self._datasets.copy()
datasets.update({"uninitialized": xobj})
return self._construct_direct(datasets, kind="hindcast")
def get_observations(self):
"""Returns xarray Datasets of the observations/verification data.
Returns:
``xarray`` Dataset of observations.
"""
return self._datasets["observations"]
def verify(
self,
reference=None,
metric=None,
comparison=None,
dim=None,
alignment=None,
**metric_kwargs,
):
"""Verifies the initialized ensemble against observations.
.. note::
This will automatically verify against all shared variables
between the initialized ensemble and observations/verification data.
Args:
reference (str): Type of reference forecasts to also verify against the
observations. Choose one or more of ['uninitialized', 'persistence'].
Defaults to None.
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply for
verification. see `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare to the observations/verification data. See
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) to apply metric over. ``dim`` is passed
on to xskillscore.{metric} and includes xskillscore's ``member_dim``.
``dim`` should contain ``member`` when ``comparison`` is probabilistic
but should not contain ``member`` when ``comparison=e2o``. Defaults to
``None`` meaning that all dimensions other than ``lead`` are reduced.
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
Dataset with dimension skill containing initialized and reference skill(s).
"""
# Have to do checks here since this doesn't call `compute_hindcast` directly.
# Will be refactored when `climpred` migrates to inheritance-based.
if dim is None:
viable_dims = dict(self._datasets["initialized"].dims)
viable_dims = list(viable_dims.keys())
if "lead" in viable_dims:
viable_dims.remove("lead")
raise ValueError(
"Designate a dimension to reduce over when applying the "
f"metric. Got {dim}. Choose one or more of {viable_dims}"
)
if ("member" in dim) and comparison not in ["m2o", "m2r"]:
raise ValueError(
"Comparison must equal 'm2o' with dim='member'. "
f"Got comparison {comparison}."
)
if isinstance(reference, str):
reference = [reference]
elif reference is None:
reference = []
def _verify(
hind,
verif,
hist,
reference,
metric,
comparison,
alignment,
dim,
**metric_kwargs,
):
"""Interior verify func to be passed to apply func."""
metric, comparison, dim = _get_metric_comparison_dim(
hind, metric, comparison, dim, kind=self.kind
)
forecast, verif = comparison.function(hind, verif, metric=metric)
forecast = forecast.rename({"init": "time"})
inits, verif_dates = return_inits_and_verif_dates(
forecast,
verif,
alignment,
reference=reference,
hist=hist,
)
metric_over_leads = [
_apply_metric_at_given_lead(
verif,
verif_dates,
lead,
hind=forecast,
hist=hist,
inits=inits,
# Ensure apply metric function returns skill and not reference
# results.
reference=None,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
for lead in forecast["lead"].data
]
result = xr.concat(metric_over_leads, dim="lead", **CONCAT_KWARGS)
result["lead"] = forecast["lead"]
if reference is not None:
if "member" in verif.dims: # if broadcasted before
verif = verif.isel(member=0)
for r in reference:
metric_over_leads = [
_apply_metric_at_given_lead(
verif,
verif_dates,
lead,
hind=forecast,
hist=hist,
inits=inits,
reference=r,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
for lead in forecast["lead"].data
]
ref = xr.concat(metric_over_leads, dim="lead", **CONCAT_KWARGS)
ref["lead"] = forecast["lead"]
# fix to get no member dim for uninitialized e2o skill #477
if (
r == "uninitialized"
and comparison.name == "e2o"
and "member" in ref.dims
):
ref = ref.mean("member")
result = xr.concat([result, ref], dim="skill", **CONCAT_KWARGS)
# rename back to 'init'
if "time" in result.dims:
result = result.rename({"time": "init"})
# Add dimension/coordinate for different references.
result = result.assign_coords(skill=["initialized"] + reference)
return result.squeeze()
has_dataset(
self._datasets["observations"], "observational", "verify a forecast"
)
if "uninitialized" in reference:
has_dataset(
self._datasets["uninitialized"],
"uninitialized",
"compute an uninitialized reference forecast",
)
hist = self._datasets["uninitialized"]
else:
hist = None
res = self._apply_climpred_function(
_verify,
init=True,
metric=metric,
comparison=comparison,
alignment=alignment,
dim=dim,
hist=hist,
reference=reference,
**metric_kwargs,
)
if self._temporally_smoothed:
# TODO: cleanup
if isinstance(res, dict) and not isinstance(res, xr.Dataset):
for res_key, res_item in res.items():
res[res_key] = _reset_temporal_axis(
res_item, self._temporally_smoothed, dim="lead"
)
else:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def bootstrap(
self,
metric=None,
comparison=None,
dim=None,
alignment=None,
reference=None,
iterations=None,
sig=95,
resample_dim="member",
pers_sig=None,
**metric_kwargs,
):
"""Bootstrap with replacement according to Goddard et al. 2013.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply for
verification, see `metrics <metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare to the observations/verification data, see
`comparisons </comparisons.html>`_.
dim (str, list of str): dimension(s) to apply metric over. ``dim`` is passed
on to xskillscore.{metric} and includes xskillscore's ``member_dim``.
``dim`` should contain ``member`` when ``comparison`` is probabilistic
but should not contain ``member`` when ``comparison='e2o'``. Defaults to
``None`` meaning that all dimensions other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``init`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
iterations (int): Number of resampling iterations for bootstrapping with
replacement. Recommended >= 500.
sig (int, default 95): Significance level in percent for deciding whether
uninitialized and persistence beat initialized skill.
resample_dim (str or list): dimension to resample from. default: 'member'.
- 'member': select a different set of members from hind
- 'init': select a different set of initializations from hind
pers_sig (int, default None):
If not None, the separate significance level for persistence.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
xr.Datasets: with dimensions ``result`` (holding ``skill``, ``p``,
``low_ci`` and ``high_ci``) and ``skill`` (holding ``initialized``,
``persistence`` and/or ``uninitialized``):
* result='verify skill', skill='initialized':
mean initialized skill
* result='high_ci', skill='initialized':
high confidence interval boundary for initialized skill
* result='p', skill='uninitialized':
p value of the hypothesis that the
difference of skill between the initialized and
uninitialized simulations is smaller or equal to zero
based on bootstrapping with replacement.
* result='p', skill='persistence':
p value of the hypothesis that the
difference of skill between the initialized and persistence
simulations is smaller or equal to zero based on
bootstrapping with replacement.
"""
if iterations is None:
raise ValueError("Designate number of bootstrapping `iterations`.")
# TODO: replace with more computationally efficient classes implementation
return bootstrap_hindcast(
self.get_initialized(),
self.get_uninitialized(),
self.get_observations(),
metric=metric,
comparison=comparison,
dim=dim,
alignment=alignment,
reference=reference,
resample_dim=resample_dim,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
)
def remove_bias(self, alignment, how="mean", cross_validate=True, **metric_kwargs):
"""Calculate and remove bias from
:py:class:`~climpred.classes.HindcastEnsemble`.
Args:
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
how (str or list of str): what kind of bias removal to perform. Select
from ['mean']. Defaults to 'mean'.
cross_validate (bool): Use properly defined mean bias removal function.
This excludes the given initialization from the bias calculation.
With False, include the given initialization in the calculation, which
is much faster and but yields similar skill with a large N of
initializations. Defaults to True.
metric_kwargs (dict): kwargs to be passed to bias.
Returns:
HindcastEnsemble: bias removed HindcastEnsemble.
"""
if isinstance(how, str):
how = [how]
for h in how:
if h == "mean":
func = mean_bias_removal
else:
raise NotImplementedError(f"{h}_bias_removal is not implemented.")
self = func(
self,
alignment=alignment,
cross_validate=cross_validate,
**metric_kwargs,
)
return self
| import numpy as np
import xarray as xr
from IPython.display import display_html
from xarray.core.formatting_html import dataset_repr
from xarray.core.options import OPTIONS as XR_OPTIONS
from .alignment import return_inits_and_verif_dates
from .bias_removal import mean_bias_removal
from .bootstrap import (
bootstrap_hindcast,
bootstrap_perfect_model,
bootstrap_uninit_pm_ensemble_from_control_cftime,
)
from .checks import (
has_dataset,
has_dims,
has_valid_lead_units,
is_xarray,
match_calendars,
match_initialized_dims,
match_initialized_vars,
)
from .constants import CONCAT_KWARGS
from .exceptions import DimensionError, VariableError
from .graphics import plot_ensemble_perfect_model, plot_lead_timeseries_hindcast
from .prediction import (
_apply_metric_at_given_lead,
_get_metric_comparison_dim,
compute_perfect_model,
)
from .reference import compute_persistence
from .smoothing import (
_reset_temporal_axis,
smooth_goddard_2013,
spatial_smoothing_xesmf,
temporal_smoothing,
)
from .utils import convert_time_index
def _display_metadata(self):
"""
This is called in the following case:
```
dp = cp.HindcastEnsemble(dple)
print(dp)
```
"""
SPACE = " "
header = f"<climpred.{type(self).__name__}>"
summary = header + "\nInitialized Ensemble:\n"
summary += SPACE + str(self._datasets["initialized"].data_vars)[18:].strip() + "\n"
if isinstance(self, HindcastEnsemble):
# Prints out observations and associated variables if they exist.
# If not, just write "None".
summary += "Observations:\n"
if any(self._datasets["observations"]):
num_obs = len(self._datasets["observations"].data_vars)
for i in range(1, num_obs + 1):
summary += (
SPACE
+ str(self._datasets["observations"].data_vars)
.split("\n")[i]
.strip()
+ "\n"
)
else:
summary += SPACE + "None\n"
elif isinstance(self, PerfectModelEnsemble):
summary += "Control:\n"
# Prints out control variables if a control is appended. If not,
# just write "None".
if any(self._datasets["control"]):
num_ctrl = len(self._datasets["control"].data_vars)
for i in range(1, num_ctrl + 1):
summary += (
SPACE
+ str(self._datasets["control"].data_vars).split("\n")[i].strip()
+ "\n"
)
else:
summary += SPACE + "None\n"
if any(self._datasets["uninitialized"]):
summary += "Uninitialized:\n"
summary += SPACE + str(self._datasets["uninitialized"].data_vars)[18:].strip()
else:
summary += "Uninitialized:\n"
summary += SPACE + "None"
return summary
def _display_metadata_html(self):
header = f"<h4>climpred.{type(self).__name__}</h4>"
display_html(header, raw=True)
init_repr_str = dataset_repr(self._datasets["initialized"])
init_repr_str = init_repr_str.replace("xarray.Dataset", "Initialized Ensemble")
display_html(init_repr_str, raw=True)
if isinstance(self, HindcastEnsemble):
if any(self._datasets["observations"]):
obs_repr_str = dataset_repr(self._datasets["observations"])
obs_repr_str = obs_repr_str.replace("xarray.Dataset", "Observations")
display_html(obs_repr_str, raw=True)
elif isinstance(self, PerfectModelEnsemble):
if any(self._datasets["control"]):
control_repr_str = dataset_repr(self._datasets["control"])
control_repr_str = control_repr_str.replace(
"xarray.Dataset", "Control Simulation"
)
display_html(control_repr_str, raw=True)
if any(self._datasets["uninitialized"]):
uninit_repr_str = dataset_repr(self._datasets["uninitialized"])
uninit_repr_str = uninit_repr_str.replace("xarray.Dataset", "Uninitialized")
display_html(uninit_repr_str, raw=True)
# better would be to aggregate repr_strs and then all return but this fails
# TypeError: __repr__ returned non-string (type NoneType)
# workaround return empty string
return ""
class PredictionEnsemble:
"""
The main object. This is the super of both `PerfectModelEnsemble` and
`HindcastEnsemble`. This cannot be called directly by a user, but
should house functions that both ensemble types can use.
"""
@is_xarray(1)
def __init__(self, xobj):
if isinstance(xobj, xr.DataArray):
# makes applying prediction functions easier, etc.
xobj = xobj.to_dataset()
has_dims(xobj, ["init", "lead"], "PredictionEnsemble")
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "init", "xobj[init]")
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(xobj)
# Add initialized dictionary and reserve sub-dictionary for an uninitialized
# run.
self._datasets = {"initialized": xobj, "uninitialized": {}}
self.kind = "prediction"
self._temporally_smoothed = None
self._is_annual_lead = None
# when you just print it interactively
# https://stackoverflow.com/questions/1535327/how-to-print-objects-of-class-using-print
def __repr__(self):
if XR_OPTIONS["display_style"] == "html":
return _display_metadata_html(self)
else:
return _display_metadata(self)
def plot(self, variable=None, ax=None, show_members=False, cmap=None):
"""Plot datasets from PredictionEnsemble.
Args:
variable (str or None): `variable` to show. Defaults to first in data_vars.
ax (plt.axes): Axis to use in plotting. By default, creates a new axis.
show_members (bool): whether to display all members individually.
Defaults to False.
cmap (str): Name of matplotlib-recognized colorbar. Defaults to `jet` for
`HindcastEnsemble` and `tab10` for `PerfectModelEnsemble`.
Returns:
ax: plt.axes
"""
if self.kind == "hindcast":
if cmap is None:
cmap = "jet"
return plot_lead_timeseries_hindcast(
self, variable=variable, ax=ax, show_members=show_members, cmap=cmap
)
elif self.kind == "perfect":
if cmap is None:
cmap = "tab10"
return plot_ensemble_perfect_model(
self, variable=variable, ax=ax, show_members=show_members, cmap=cmap
)
def _math(self, other, operator):
"""Helper function for __add__, __sub__, __mul__, __truediv__.
Allows math operations with type:
- int
- float
- np.ndarray
- xr.DataArray without new dimensions
- xr.Dataset without new dimensions or variables
"""
assert isinstance(operator, str)
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
ALLOWED_TYPES_FOR_MATH_OPERATORS = [
int,
float,
np.ndarray,
xr.DataArray,
xr.Dataset,
type(self),
]
OPERATOR_STR = {
"add": "+",
"sub": "-",
"mul": "*",
"div": "/",
}
error_str = f"Cannot use {type(self)} {OPERATOR_STR[operator]} {type(other)}"
# catch undefined types for other
if not isinstance(other, tuple(ALLOWED_TYPES_FOR_MATH_OPERATORS)):
raise TypeError(
f"{error_str} because type {type(other)} not supported. "
f"Please choose from {ALLOWED_TYPES_FOR_MATH_OPERATORS}."
)
# catch other dimensions in other
if isinstance(other, tuple([xr.Dataset, xr.DataArray])):
if not set(other.dims).issubset(self._datasets["initialized"].dims):
raise DimensionError(f"{error_str} containing new dimensions.")
# catch xr.Dataset with different data_vars
if isinstance(other, xr.Dataset):
if list(other.data_vars) != list(self._datasets["initialized"].data_vars):
raise VariableError(
f"{error_str} with new `data_vars`. Please use {type(self)} "
f"{operator} {type(other)} only with same `data_vars`. Found "
f"initialized.data_vars = "
f' {list(self._datasets["initialized"].data_vars)} vs. '
f"other.data_vars = { list(other.data_vars)}."
)
operator = eval(operator)
if isinstance(other, PredictionEnsemble):
# Create temporary copy to modify to avoid inplace operation.
datasets = self._datasets.copy()
for dataset in datasets:
other_dataset = other._datasets[dataset]
# Some pre-allocated entries might be empty, such as 'uninitialized'
if self._datasets[dataset]:
# Loop through observations if there are multiple
if dataset == "observations" and isinstance(
self._datasets[dataset], dict
):
obs_datasets = self._datasets["observations"].copy()
for obs_dataset in obs_datasets:
other_obs_dataset = other._datasets["observations"][
obs_dataset
]
obs_datasets.update(
{
obs_dataset: operator(
obs_datasets[obs_dataset], other_obs_dataset
)
}
)
datasets.update({"observations": obs_datasets})
else:
if datasets[dataset]:
datasets.update(
{dataset: operator(datasets[dataset], other_dataset)}
)
return self._construct_direct(datasets, kind=self.kind)
else:
return self._apply_func(operator, other)
def __add__(self, other):
return self._math(other, operator="add")
def __sub__(self, other):
return self._math(other, operator="sub")
def __mul__(self, other):
return self._math(other, operator="mul")
def __truediv__(self, other):
return self._math(other, operator="div")
def __getitem__(self, varlist):
"""Allows subsetting data variable from PredictionEnsemble as from xr.Dataset.
Args:
* varlist (list of str, str): list of names or name of data variable(s) to
subselect
"""
if isinstance(varlist, str):
varlist = [varlist]
if not isinstance(varlist, list):
raise ValueError(
"Please subset PredictionEnsemble as you would subset an xr.Dataset "
"with a list or single string of variable name(s), found "
f"{type(varlist)}."
)
def sel_vars(ds, varlist):
return ds[varlist]
return self._apply_func(sel_vars, varlist)
def __getattr__(self, name):
"""Allows for xarray methods to be applied to our prediction objects.
Args:
* name: Function, e.g., .isel() or .sum().
"""
def wrapper(*args, **kwargs):
"""Applies arbitrary function to all datasets in the PredictionEnsemble
object.
Got this from: https://stackoverflow.com/questions/41919499/
how-to-call-undefined-methods-sequentially-in-python-class
"""
def _apply_xr_func(v, name, *args, **kwargs):
"""Handles exceptions in our dictionary comprehension.
In other words, this will skip applying the arbitrary function
to a sub-dataset if a ValueError is thrown. This specifically
targets cases where certain datasets don't have the given
dim that's being called. E.g., ``.isel(lead=0)`` should only
be applied to the initialized dataset.
Reference:
* https://stackoverflow.com/questions/1528237/
how-to-handle-exceptions-in-a-list-comprehensions
"""
try:
return getattr(v, name)(*args, **kwargs)
# ValueError : Cases such as .sum(dim='time'). This doesn't apply
# it to the given dataset if the dimension doesn't exist.
# KeyError : Cases where a function calls the index of a Dataset. Such
# as ds[dim] and the dim doesn't exist as a key.
# DimensionError: This accounts for our custom error when applying
# some stats functions.
except (ValueError, KeyError, DimensionError):
return v
return self._apply_func(_apply_xr_func, name, *args, **kwargs)
return wrapper
@classmethod
def _construct_direct(cls, datasets, kind):
"""Shortcut around __init__ for internal use to avoid inplace
operations.
Pulled from xarrray Dataset class.
https://github.com/pydata/xarray/blob/master/xarray/core/dataset.py
"""
obj = object.__new__(cls)
obj._datasets = datasets
obj.kind = kind
return obj
def _apply_func(self, func, *args, **kwargs):
"""Apply a function to all datasets in a `PredictionEnsemble`."""
# Create temporary copy to modify to avoid inplace operation.
datasets = self._datasets.copy()
# More explicit than nested dictionary comprehension.
for outer_k, outer_v in datasets.items():
# If initialized, control, uninitialized and just a singular
# dataset, apply the function directly to it.
if isinstance(outer_v, xr.Dataset):
datasets.update({outer_k: func(outer_v, *args, **kwargs)})
else:
# If a nested dictionary is encountered (i.e., a set of
# observations) apply to each individually.
#
# Similar to the ``add_observations`` method, this only seems to
# avoid inplace operations by copying the nested dictionary
# separately and then updating the main dictionary.
temporary_dataset = self._datasets[outer_k].copy()
for inner_k, inner_v in temporary_dataset.items():
temporary_dataset.update({inner_k: func(inner_v, *args, **kwargs)})
datasets.update({outer_k: temporary_dataset})
# Instantiates new object with the modified datasets.
return self._construct_direct(datasets, kind=self.kind)
def get_initialized(self):
"""Returns the xarray dataset for the initialized ensemble."""
return self._datasets["initialized"]
def get_uninitialized(self):
"""Returns the xarray dataset for the uninitialized ensemble."""
return self._datasets["uninitialized"]
def smooth(self, smooth_kws=None, how="mean", **xesmf_kwargs):
"""Smooth all entries of PredictionEnsemble in the same manner to be
able to still calculate prediction skill afterwards.
Args:
smooth_kws (dict or str): Dictionary to specify the dims to
smooth compatible with
:py:func:`~climpred.smoothing.spatial_smoothing_xesmf` or
:py:func:`~climpred.smoothing.temporal_smoothing`.
Shortcut for Goddard et al. 2013 recommendations:
'goddard2013'. Defaults to None.
how (str): how to smooth temporally. From ['mean','sum']. Defaults to
'mean'.
**xesmf_kwargs (args): kwargs passed to
:py:func:`~climpred.smoothing.spatial_smoothing_xesmf`
Examples:
>>> PredictionEnsemble.smooth({'lead': 2, 'lat': 5, 'lon': 4'})
>>> PredictionEnsemble.smooth('goddard2013')
>>> PredictionEnsemble.smooth({'lon':1, 'lat':1}, method='patch')
>>> PredictionEnsemble.smooth({'lead':2}, how='sum')
"""
if not smooth_kws:
return self
# get proper smoothing function based on smooth args
if isinstance(smooth_kws, str):
if "goddard" in smooth_kws:
if self._is_annual_lead:
smooth_fct = smooth_goddard_2013
tsmooth_kws = {"lead": 4} # default
d_lon_lat_kws = {"lon": 5, "lat": 5} # default
else:
raise ValueError(
"`goddard2013` smoothing only available for annual leads."
)
else:
raise ValueError(
'Please provide from list of available smoothings: \
["goddard2013"]'
)
# TODO: actively searches for lot and lat in dims. Maybe this part of the code
# could be more robust in how it finds these two spatial dimensions regardless
# of name. Optional work in progress comment.
elif isinstance(smooth_kws, dict):
non_time_dims = [
dim for dim in smooth_kws.keys() if dim not in ["time", "lead"]
]
if len(non_time_dims) > 0:
non_time_dims = non_time_dims[0]
# goddard when time_dim and lon/lat given
if ("lon" in smooth_kws or "lat" in smooth_kws) and (
"lead" in smooth_kws or "time" in smooth_kws
):
smooth_fct = smooth_goddard_2013
# separate lon, lat keywords into d_lon_lat_kws
d_lon_lat_kws = dict()
tsmooth_kws = dict()
for c in ["lon", "lat"]:
if c in smooth_kws:
d_lon_lat_kws[c] = smooth_kws[c]
else:
tsmooth_kws[c] = smooth_kws[c]
# else only one smoothing operation
elif "lon" in smooth_kws or "lat" in smooth_kws:
smooth_fct = spatial_smoothing_xesmf
d_lon_lat_kws = smooth_kws
tsmooth_kws = None
elif "lead" in smooth_kws or "time" in smooth_kws:
smooth_fct = temporal_smoothing
d_lon_lat_kws = None
tsmooth_kws = smooth_kws
else:
raise ValueError(
'Please provide kwargs to fulfill functions: \
["spatial_smoothing_xesmf", "temporal_smoothing"].'
)
else:
raise ValueError(
"Please provide kwargs as dict or str and not", type(smooth_kws)
)
self = self.map(
smooth_fct,
tsmooth_kws=tsmooth_kws,
d_lon_lat_kws=d_lon_lat_kws,
how=how,
**xesmf_kwargs,
)
if smooth_fct == smooth_goddard_2013 or smooth_fct == temporal_smoothing:
self._temporally_smoothed = tsmooth_kws
return self
class PerfectModelEnsemble(PredictionEnsemble):
"""An object for "perfect model" climate prediction ensembles.
`PerfectModelEnsemble` is a sub-class of `PredictionEnsemble`. It tracks
the control run used to initialize the ensemble for easy computations,
bootstrapping, etc.
This object is built on `xarray` and thus requires the input object to
be an `xarray` Dataset or DataArray.
"""
def __init__(self, xobj):
"""Create a `PerfectModelEnsemble` object by inputting output from the
control run in `xarray` format.
Args:
xobj (xarray object):
decadal prediction ensemble output.
Attributes:
control: Dictionary of control run associated with the initialized
ensemble.
uninitialized: Dictionary of uninitialized run that is
bootstrapped from the initialized run.
"""
super().__init__(xobj)
# Reserve sub-dictionary for the control simulation.
self._datasets.update({"control": {}})
self.kind = "perfect"
def _apply_climpred_function(self, func, input_dict=None, **kwargs):
"""Helper function to loop through observations and apply an arbitrary climpred
function.
Args:
func (function): climpred function to apply to object.
input_dict (dict): dictionary with the following things:
* ensemble: initialized or uninitialized ensemble.
* control: control dictionary from HindcastEnsemble.
* init (bool): True if the initialized ensemble, False if uninitialized.
"""
ensemble = input_dict["ensemble"]
control = input_dict["control"]
init = input_dict["init"]
init_vars, ctrl_vars = self._vars_to_drop(init=init)
ensemble = ensemble.drop_vars(init_vars)
if control:
control = control.drop_vars(ctrl_vars)
return func(ensemble, control, **kwargs)
def _vars_to_drop(self, init=True):
"""Returns list of variables to drop when comparing
initialized/uninitialized to a control.
This is useful if the two products being compared do not share the same
variables. I.e., if the control has ['SST'] and the initialized has
['SST', 'SALT'], this will return a list with ['SALT'] to be dropped
from the initialized.
Args:
init (bool, default True):
If `True`, check variables on the initialized.
If `False`, check variables on the uninitialized.
Returns:
Lists of variables to drop from the initialized/uninitialized
and control Datasets.
"""
init_str = "initialized" if init else "uninitialized"
init_vars = list(self._datasets[init_str])
# only drop if control present
if self._datasets["control"]:
ctrl_vars = list(self._datasets["control"])
# Make lists of variables to drop that aren't in common
# with one another.
init_vars_to_drop = list(set(init_vars) - set(ctrl_vars))
ctrl_vars_to_drop = list(set(ctrl_vars) - set(init_vars))
else:
init_vars_to_drop, ctrl_vars_to_drop = [], []
return init_vars_to_drop, ctrl_vars_to_drop
@is_xarray(1)
def add_control(self, xobj):
"""Add the control run that initialized the climate prediction
ensemble.
Args:
xobj (xarray object): Dataset/DataArray of the control run.
"""
# NOTE: These should all be decorators.
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj, kind2="control")
datasets = self._datasets.copy()
datasets.update({"control": xobj})
return self._construct_direct(datasets, kind="perfect")
def generate_uninitialized(self):
"""Generate an uninitialized ensemble by bootstrapping the
initialized prediction ensemble.
Returns:
Bootstrapped (uninitialized) ensemble as a Dataset.
"""
has_dataset(
self._datasets["control"], "control", "generate an uninitialized ensemble."
)
uninit = bootstrap_uninit_pm_ensemble_from_control_cftime(
self._datasets["initialized"], self._datasets["control"]
)
datasets = self._datasets.copy()
datasets.update({"uninitialized": uninit})
return self._construct_direct(datasets, kind="perfect")
def get_control(self):
"""Returns the control as an xarray dataset."""
return self._datasets["control"]
def verify(
self,
metric=None,
comparison=None,
dim=None,
reference=None,
**metric_kwargs,
):
"""Verify initialized predictions against a configuration of other ensemble members.
.. note::
The configuration of the other ensemble members is based off of the
``comparison`` keyword argument.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply in the
comparison. See `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare the initialized prediction ensemble with itself, see
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply ``metric``.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None`` meaning that all dimensions
other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset of comparison results with ``skill`` dimension for verification
results for the initialized ensemble (``init``) and any reference forecasts
verified.
"""
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"]
if isinstance(self._datasets["control"], xr.Dataset)
else None,
"init": True,
}
result = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
result = _reset_temporal_axis(result, self._temporally_smoothed, dim="lead")
# compute reference skills
if isinstance(reference, str):
reference = [reference]
if reference:
for r in reference:
ref_compute_kwargs = metric_kwargs.copy()
ref_compute_kwargs["metric"] = metric
if r != "persistence":
ref_compute_kwargs["comparison"] = comparison
ref = getattr(self, f"_compute_{r}")(**ref_compute_kwargs)
result = xr.concat([result, ref], dim="skill", **CONCAT_KWARGS)
result = result.assign_coords(skill=["initialized"] + reference)
return result.squeeze()
def _compute_uninitialized(
self, metric=None, comparison=None, dim=None, **metric_kwargs
):
"""Verify the bootstrapped uninitialized run against itself.
.. note::
The configuration of the other ensemble members is based off of the
``comparison`` keyword argument.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply in the
comparison. See `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare the uninitialized against itself, see
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None``, meaning that all dimensions
other than ``lead`` are reduced.
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset with dimension skill containing initialized and reference skill(s).
"""
has_dataset(
self._datasets["uninitialized"],
"uninitialized",
"compute an uninitialized metric",
)
input_dict = {
"ensemble": self._datasets["uninitialized"],
"control": self._datasets["control"]
if isinstance(self._datasets["control"], xr.Dataset)
else None,
"init": False,
}
res = self._apply_climpred_function(
compute_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def _compute_persistence(self, metric=None, dim=None, **metric_kwargs):
"""Verify a simple persistence forecast of the control run against itself.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to use when
verifying skill of the persistence forecast. See `metrics </metrics.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None``, meaning that all dimensions
other than ``lead`` are reduced.
**metric_kwargs (optional): Arguments passed to ``metric``.
Returns:
Dataset of persistence forecast results.
Reference:
* Chapter 8 (Short-Term Climate Prediction) in
Van den Dool, Huug. Empirical methods in short-term climate
prediction. Oxford University Press, 2007.
"""
has_dataset(
self._datasets["control"], "control", "compute a persistence forecast"
)
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"],
"init": True,
}
if dim is None:
dim = list(self._datasets["initialized"].dims)
for d in ["member", "lead"]:
if d in dim:
dim.remove(d)
res = self._apply_climpred_function(
compute_persistence,
input_dict=input_dict,
metric=metric,
alignment="same_inits",
dim=dim,
**metric_kwargs,
)
if self._temporally_smoothed:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def bootstrap(
self,
metric=None,
comparison=None,
dim=None,
reference=None,
iterations=None,
sig=95,
pers_sig=None,
**metric_kwargs,
):
"""Bootstrap with replacement according to Goddard et al. 2013.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to verify
bootstrapped skill, see `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): Comparison
passed to verify, see `comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) over which to apply metric.
``dim`` is passed on to xskillscore.{metric} and includes xskillscore's
``member_dim``. ``dim`` should contain ``member`` when ``comparison``
is probabilistic but should not contain ``member`` when
``comparison=e2c``. Defaults to ``None`` meaning that all dimensions
other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
iterations (int): Number of resampling iterations for bootstrapping with
replacement. Recommended >= 500.
sig (int, default 95): Significance level in percent for deciding whether
uninitialized and persistence beat initialized skill.
pers_sig (int): If not ``None``, the separate significance level for
persistence. Defaults to ``None``, or the same significance as ``sig``.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
xr.Datasets: with dimensions ``result`` (holding ``verify skill``, ``p``,
``low_ci`` and ``high_ci``) and ``skill`` (holding ``initialized``,
``persistence`` and/or ``uninitialized``):
* result='verify skill', skill='initialized':
mean initialized skill
* result='high_ci', skill='initialized':
high confidence interval boundary for initialized skill
* result='p', skill='uninitialized':
p value of the hypothesis that the
difference of skill between the initialized and
uninitialized simulations is smaller or equal to zero
based on bootstrapping with replacement.
* result='p', skill='persistence':
p value of the hypothesis that the
difference of skill between the initialized and persistenceistence
simulations is smaller or equal to zero based on
bootstrapping with replacement.
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
"""
if iterations is None:
raise ValueError("Designate number of bootstrapping `iterations`.")
has_dataset(self._datasets["control"], "control", "iteration")
input_dict = {
"ensemble": self._datasets["initialized"],
"control": self._datasets["control"],
"init": True,
}
return self._apply_climpred_function(
bootstrap_perfect_model,
input_dict=input_dict,
metric=metric,
comparison=comparison,
dim=dim,
reference=reference,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
**metric_kwargs,
)
class HindcastEnsemble(PredictionEnsemble):
"""An object for climate prediction ensembles initialized by a data-like
product.
`HindcastEnsemble` is a sub-class of `PredictionEnsemble`. It tracks a single
verification dataset (i.e., observations) associated with the hindcast ensemble
for easy computation across multiple variables.
This object is built on `xarray` and thus requires the input object to
be an `xarray` Dataset or DataArray.
"""
def __init__(self, xobj):
"""Create a `HindcastEnsemble` object by inputting output from a
prediction ensemble in `xarray` format.
Args:
xobj (xarray object):
decadal prediction ensemble output.
Attributes:
observations: Dictionary of verification data to associate with the decadal
prediction ensemble.
uninitialized: Dictionary of companion (or bootstrapped)
uninitialized ensemble run.
"""
super().__init__(xobj)
self._datasets.update({"observations": {}})
self.kind = "hindcast"
def _apply_climpred_function(self, func, init, **kwargs):
"""Helper function to loop through verification data and apply an arbitrary
climpred function.
Args:
func (function): climpred function to apply to object.
init (bool): Whether or not it's the initialized ensemble.
"""
hind = self._datasets["initialized"]
verif = self._datasets["observations"]
drop_init, drop_obs = self._vars_to_drop(init=init)
return func(hind.drop_vars(drop_init), verif.drop_vars(drop_obs), **kwargs)
def _vars_to_drop(self, init=True):
"""Returns list of variables to drop when comparing
initialized/uninitialized to observations.
This is useful if the two products being compared do not share the same
variables. I.e., if the observations have ['SST'] and the initialized has
['SST', 'SALT'], this will return a list with ['SALT'] to be dropped
from the initialized.
Args:
init (bool, default True):
If ``True``, check variables on the initialized.
If ``False``, check variables on the uninitialized.
Returns:
Lists of variables to drop from the initialized/uninitialized
and observational Datasets.
"""
if init:
init_vars = [var for var in self._datasets["initialized"].data_vars]
else:
init_vars = [var for var in self._datasets["uninitialized"].data_vars]
obs_vars = [var for var in self._datasets["observations"].data_vars]
# Make lists of variables to drop that aren't in common
# with one another.
init_vars_to_drop = list(set(init_vars) - set(obs_vars))
obs_vars_to_drop = list(set(obs_vars) - set(init_vars))
return init_vars_to_drop, obs_vars_to_drop
@is_xarray(1)
def add_observations(self, xobj):
"""Add verification data against which to verify the initialized ensemble.
Args:
xobj (xarray object): Dataset/DataArray to append to the
``HindcastEnsemble`` object.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that time is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj)
datasets = self._datasets.copy()
datasets.update({"observations": xobj})
return self._construct_direct(datasets, kind="hindcast")
@is_xarray(1)
def add_uninitialized(self, xobj):
"""Add a companion uninitialized ensemble for comparison to verification data.
Args:
xobj (xarray object): Dataset/DataArray of the uninitialzed
ensemble.
"""
if isinstance(xobj, xr.DataArray):
xobj = xobj.to_dataset()
match_initialized_dims(self._datasets["initialized"], xobj, uninitialized=True)
match_initialized_vars(self._datasets["initialized"], xobj)
# Check that init is int, cftime, or datetime; convert ints or cftime to
# datetime.
xobj = convert_time_index(xobj, "time", "xobj[init]")
# Check that converted/original cftime calendar is the same as the
# initialized calendar to avoid any alignment errors.
match_calendars(self._datasets["initialized"], xobj, kind2="uninitialized")
datasets = self._datasets.copy()
datasets.update({"uninitialized": xobj})
return self._construct_direct(datasets, kind="hindcast")
def get_observations(self):
"""Returns xarray Datasets of the observations/verification data.
Returns:
``xarray`` Dataset of observations.
"""
return self._datasets["observations"]
def verify(
self,
reference=None,
metric=None,
comparison=None,
dim=None,
alignment=None,
**metric_kwargs,
):
"""Verifies the initialized ensemble against observations.
.. note::
This will automatically verify against all shared variables
between the initialized ensemble and observations/verification data.
Args:
reference (str): Type of reference forecasts to also verify against the
observations. Choose one or more of ['uninitialized', 'persistence'].
Defaults to None.
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply for
verification. see `metrics </metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare to the observations/verification data. See
`comparisons </comparisons.html>`_.
dim (str, list of str): Dimension(s) to apply metric over. ``dim`` is passed
on to xskillscore.{metric} and includes xskillscore's ``member_dim``.
``dim`` should contain ``member`` when ``comparison`` is probabilistic
but should not contain ``member`` when ``comparison=e2o``. Defaults to
``None`` meaning that all dimensions other than ``lead`` are reduced.
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
Dataset with dimension skill containing initialized and reference skill(s).
"""
# Have to do checks here since this doesn't call `compute_hindcast` directly.
# Will be refactored when `climpred` migrates to inheritance-based.
if dim is None:
viable_dims = dict(self._datasets["initialized"].dims)
viable_dims = list(viable_dims.keys())
if "lead" in viable_dims:
viable_dims.remove("lead")
raise ValueError(
"Designate a dimension to reduce over when applying the "
f"metric. Got {dim}. Choose one or more of {viable_dims}"
)
if ("member" in dim) and comparison not in ["m2o", "m2r"]:
raise ValueError(
"Comparison must equal 'm2o' with dim='member'. "
f"Got comparison {comparison}."
)
if isinstance(reference, str):
reference = [reference]
elif reference is None:
reference = []
def _verify(
hind,
verif,
hist,
reference,
metric,
comparison,
alignment,
dim,
**metric_kwargs,
):
"""Interior verify func to be passed to apply func."""
metric, comparison, dim = _get_metric_comparison_dim(
hind, metric, comparison, dim, kind=self.kind
)
forecast, verif = comparison.function(hind, verif, metric=metric)
forecast = forecast.rename({"init": "time"})
inits, verif_dates = return_inits_and_verif_dates(
forecast,
verif,
alignment,
reference=reference,
hist=hist,
)
metric_over_leads = [
_apply_metric_at_given_lead(
verif,
verif_dates,
lead,
hind=forecast,
hist=hist,
inits=inits,
# Ensure apply metric function returns skill and not reference
# results.
reference=None,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
for lead in forecast["lead"].data
]
result = xr.concat(metric_over_leads, dim="lead", **CONCAT_KWARGS)
result["lead"] = forecast["lead"]
if reference is not None:
if "member" in verif.dims: # if broadcasted before
verif = verif.isel(member=0)
for r in reference:
metric_over_leads = [
_apply_metric_at_given_lead(
verif,
verif_dates,
lead,
hind=forecast,
hist=hist,
inits=inits,
reference=r,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
for lead in forecast["lead"].data
]
ref = xr.concat(metric_over_leads, dim="lead", **CONCAT_KWARGS)
ref["lead"] = forecast["lead"]
# fix to get no member dim for uninitialized e2o skill #477
if (
r == "uninitialized"
and comparison.name == "e2o"
and "member" in ref.dims
):
ref = ref.mean("member")
result = xr.concat([result, ref], dim="skill", **CONCAT_KWARGS)
# rename back to 'init'
if "time" in result.dims:
result = result.rename({"time": "init"})
# Add dimension/coordinate for different references.
result = result.assign_coords(skill=["initialized"] + reference)
return result.squeeze()
has_dataset(
self._datasets["observations"], "observational", "verify a forecast"
)
if "uninitialized" in reference:
has_dataset(
self._datasets["uninitialized"],
"uninitialized",
"compute an uninitialized reference forecast",
)
hist = self._datasets["uninitialized"]
else:
hist = None
res = self._apply_climpred_function(
_verify,
init=True,
metric=metric,
comparison=comparison,
alignment=alignment,
dim=dim,
hist=hist,
reference=reference,
**metric_kwargs,
)
if self._temporally_smoothed:
# TODO: cleanup
if isinstance(res, dict) and not isinstance(res, xr.Dataset):
for res_key, res_item in res.items():
res[res_key] = _reset_temporal_axis(
res_item, self._temporally_smoothed, dim="lead"
)
else:
res = _reset_temporal_axis(res, self._temporally_smoothed, dim="lead")
return res
def bootstrap(
self,
metric=None,
comparison=None,
dim=None,
alignment=None,
reference=None,
iterations=None,
sig=95,
resample_dim="member",
pers_sig=None,
**metric_kwargs,
):
"""Bootstrap with replacement according to Goddard et al. 2013.
Args:
metric (str, :py:class:`~climpred.metrics.Metric`): Metric to apply for
verification, see `metrics <metrics.html>`_.
comparison (str, :py:class:`~climpred.comparisons.Comparison`): How to
compare to the observations/verification data, see
`comparisons </comparisons.html>`_.
dim (str, list of str): dimension(s) to apply metric over. ``dim`` is passed
on to xskillscore.{metric} and includes xskillscore's ``member_dim``.
``dim`` should contain ``member`` when ``comparison`` is probabilistic
but should not contain ``member`` when ``comparison='e2o'``. Defaults to
``None`` meaning that all dimensions other than ``lead`` are reduced.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``init`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
iterations (int): Number of resampling iterations for bootstrapping with
replacement. Recommended >= 500.
sig (int, default 95): Significance level in percent for deciding whether
uninitialized and persistence beat initialized skill.
resample_dim (str or list): dimension to resample from. default: 'member'.
- 'member': select a different set of members from hind
- 'init': select a different set of initializations from hind
pers_sig (int, default None):
If not None, the separate significance level for persistence.
**metric_kwargs (optional): arguments passed to ``metric``.
Returns:
xr.Datasets: with dimensions ``result`` (holding ``skill``, ``p``,
``low_ci`` and ``high_ci``) and ``skill`` (holding ``initialized``,
``persistence`` and/or ``uninitialized``):
* result='verify skill', skill='initialized':
mean initialized skill
* result='high_ci', skill='initialized':
high confidence interval boundary for initialized skill
* result='p', skill='uninitialized':
p value of the hypothesis that the
difference of skill between the initialized and
uninitialized simulations is smaller or equal to zero
based on bootstrapping with replacement.
* result='p', skill='persistence':
p value of the hypothesis that the
difference of skill between the initialized and persistence
simulations is smaller or equal to zero based on
bootstrapping with replacement.
"""
if iterations is None:
raise ValueError("Designate number of bootstrapping `iterations`.")
# TODO: replace with more computationally efficient classes implementation
return bootstrap_hindcast(
self.get_initialized(),
self.get_uninitialized(),
self.get_observations(),
metric=metric,
comparison=comparison,
dim=dim,
alignment=alignment,
reference=reference,
resample_dim=resample_dim,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
)
def remove_bias(self, alignment, how="mean", cross_validate=True, **metric_kwargs):
"""Calculate and remove bias from
:py:class:`~climpred.classes.HindcastEnsemble`.
Args:
alignment (str): which inits or verification times should be aligned?
- 'maximize': maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- 'same_inits': slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- 'same_verif': slice to a common/consistent verification time frame
prior to computing metric. This philosophy follows the thought that
each lead should be based on the same set of verification dates.
how (str or list of str): what kind of bias removal to perform. Select
from ['mean']. Defaults to 'mean'.
cross_validate (bool): Use properly defined mean bias removal function.
This excludes the given initialization from the bias calculation.
With False, include the given initialization in the calculation, which
is much faster and but yields similar skill with a large N of
initializations. Defaults to True.
metric_kwargs (dict): kwargs to be passed to bias.
Returns:
HindcastEnsemble: bias removed HindcastEnsemble.
"""
if isinstance(how, str):
how = [how]
for h in how:
if h == "mean":
func = mean_bias_removal
else:
raise NotImplementedError(f"{h}_bias_removal is not implemented.")
self = func(
self,
alignment=alignment,
cross_validate=cross_validate,
**metric_kwargs,
)
return self
|
import pandas as pd
import numpy as np
import os.path
from unittest import TestCase
from tsgen.gen import TimeSerieGenerator
class TimeSerieGeneratorTestCase(TestCase):
def test_generate_df_freq(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
ts_name="ts.test",
)
df = ts_gen.generate_df()
self.assertEqual(len(df), 2)
def test_generate_df_name(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
ts_name="ts.test_generate_df_name",
)
df = ts_gen.generate_df()
self.assertEqual(df["ts_name"][0], ts_gen.ts_name)
def test_export_df(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
tz="UTC",
low=0,
high=100,
ts_name="ts.test_export_df",
)
df = ts_gen.generate_df()
ts_gen.export_df(df)
self.assertTrue(
os.path.exists(
f"{ts_gen.ts_name}_{pd.datetime.now().strftime("%y-%m-%d")}.csv"
)
)
def test_export(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
tz="UTC",
low=0,
high=100,
ts_name="ts.test_export",
)
ts_gen.generate()
self.assertTrue(
os.path.exists(
f"{ts_gen.ts_name}_{pd.datetime.now().strftime("%y-%m-%d")}.csv"
)
)
| import pandas as pd
import numpy as np
import os.path
from unittest import TestCase
from tsgen.gen import TimeSerieGenerator
class TimeSerieGeneratorTestCase(TestCase):
def test_generate_df_freq(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
ts_name="ts.test",
)
df = ts_gen.generate_df()
self.assertEqual(len(df), 2)
def test_generate_df_name(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
ts_name="ts.test_generate_df_name",
)
df = ts_gen.generate_df()
self.assertEqual(df["ts_name"][0], ts_gen.ts_name)
def test_export_df(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
tz="UTC",
low=0,
high=100,
ts_name="ts.test_export_df",
)
df = ts_gen.generate_df()
ts_gen.export_df(df)
self.assertTrue(
os.path.exists(
f"{ts_gen.ts_name}_{pd.datetime.now().strftime('%y-%m-%d')}.csv"
)
)
def test_export(self):
ts_gen = TimeSerieGenerator(
date_start="1990-01-01",
date_end="1990-01-02",
freq="D",
tz="UTC",
low=0,
high=100,
ts_name="ts.test_export",
)
ts_gen.generate()
self.assertTrue(
os.path.exists(
f"{ts_gen.ts_name}_{pd.datetime.now().strftime('%y-%m-%d')}.csv"
)
)
|
"""Tests for the link user flow."""
from http import HTTPStatus
from unittest.mock import patch
from . import async_setup_auth
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI
async def async_get_code(hass, aiohttp_client):
"""Return authorization code for link user tests."""
config = [
{
"name": "Example",
"type": "insecure_example",
"users": [
{"username": "test-user", "password": "test-pass", "name": "Test Name"}
],
},
{
"name": "Example",
"id": "2nd auth",
"type": "insecure_example",
"users": [
{"username": "2nd-user", "password": "2nd-pass", "name": "2nd Name"}
],
},
]
client = await async_setup_auth(hass, aiohttp_client, config)
user = await hass.auth.async_create_user(name="Hello")
refresh_token = await hass.auth.async_create_refresh_token(user, CLIENT_ID)
access_token = hass.auth.async_create_access_token(refresh_token)
# Now authenticate with the 2nd flow
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", "2nd auth"],
"redirect_uri": CLIENT_REDIRECT_URI,
"type": "link_user",
},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
resp = await client.post(
f"/auth/login_flow/{step["flow_id"]}",
json={"client_id": CLIENT_ID, "username": "2nd-user", "password": "2nd-pass"},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
return {
"user": user,
"code": step["result"],
"client": client,
"access_token": access_token,
}
async def test_link_user(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info["access_token"]}"},
)
assert resp.status == HTTPStatus.OK
assert len(info["user"].credentials) == 1
async def test_link_user_invalid_client_id(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": "invalid", "code": code},
headers={"authorization": f"Bearer {info["access_token"]}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
assert len(info["user"].credentials) == 0
async def test_link_user_invalid_code(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": "invalid"},
headers={"authorization": f"Bearer {info["access_token"]}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
assert len(info["user"].credentials) == 0
async def test_link_user_invalid_auth(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": "Bearer invalid"},
)
assert resp.status == HTTPStatus.UNAUTHORIZED
assert len(info["user"].credentials) == 0
async def test_link_user_already_linked_same_user(hass, aiohttp_client):
"""Test linking a user to a credential it's already linked to."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
with patch.object(
hass.auth, "async_get_user_by_credentials", return_value=info["user"]
):
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info["access_token"]}"},
)
assert resp.status == HTTPStatus.OK
# The credential was not added because it saw that it was already linked
assert len(info["user"].credentials) == 0
async def test_link_user_already_linked_other_user(hass, aiohttp_client):
"""Test linking a user to a credential already linked to other user."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
another_user = await hass.auth.async_create_user(name="Another")
# Link user
with patch.object(
hass.auth, "async_get_user_by_credentials", return_value=another_user
):
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info["access_token"]}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
# The credential was not added because it saw that it was already linked
assert len(info["user"].credentials) == 0
assert len(another_user.credentials) == 0
| """Tests for the link user flow."""
from http import HTTPStatus
from unittest.mock import patch
from . import async_setup_auth
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI
async def async_get_code(hass, aiohttp_client):
"""Return authorization code for link user tests."""
config = [
{
"name": "Example",
"type": "insecure_example",
"users": [
{"username": "test-user", "password": "test-pass", "name": "Test Name"}
],
},
{
"name": "Example",
"id": "2nd auth",
"type": "insecure_example",
"users": [
{"username": "2nd-user", "password": "2nd-pass", "name": "2nd Name"}
],
},
]
client = await async_setup_auth(hass, aiohttp_client, config)
user = await hass.auth.async_create_user(name="Hello")
refresh_token = await hass.auth.async_create_refresh_token(user, CLIENT_ID)
access_token = hass.auth.async_create_access_token(refresh_token)
# Now authenticate with the 2nd flow
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", "2nd auth"],
"redirect_uri": CLIENT_REDIRECT_URI,
"type": "link_user",
},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={"client_id": CLIENT_ID, "username": "2nd-user", "password": "2nd-pass"},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
return {
"user": user,
"code": step["result"],
"client": client,
"access_token": access_token,
}
async def test_link_user(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info['access_token']}"},
)
assert resp.status == HTTPStatus.OK
assert len(info["user"].credentials) == 1
async def test_link_user_invalid_client_id(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": "invalid", "code": code},
headers={"authorization": f"Bearer {info['access_token']}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
assert len(info["user"].credentials) == 0
async def test_link_user_invalid_code(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": "invalid"},
headers={"authorization": f"Bearer {info['access_token']}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
assert len(info["user"].credentials) == 0
async def test_link_user_invalid_auth(hass, aiohttp_client):
"""Test linking a user to new credentials."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": "Bearer invalid"},
)
assert resp.status == HTTPStatus.UNAUTHORIZED
assert len(info["user"].credentials) == 0
async def test_link_user_already_linked_same_user(hass, aiohttp_client):
"""Test linking a user to a credential it's already linked to."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
# Link user
with patch.object(
hass.auth, "async_get_user_by_credentials", return_value=info["user"]
):
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info['access_token']}"},
)
assert resp.status == HTTPStatus.OK
# The credential was not added because it saw that it was already linked
assert len(info["user"].credentials) == 0
async def test_link_user_already_linked_other_user(hass, aiohttp_client):
"""Test linking a user to a credential already linked to other user."""
info = await async_get_code(hass, aiohttp_client)
client = info["client"]
code = info["code"]
another_user = await hass.auth.async_create_user(name="Another")
# Link user
with patch.object(
hass.auth, "async_get_user_by_credentials", return_value=another_user
):
resp = await client.post(
"/auth/link_user",
json={"client_id": CLIENT_ID, "code": code},
headers={"authorization": f"Bearer {info['access_token']}"},
)
assert resp.status == HTTPStatus.BAD_REQUEST
# The credential was not added because it saw that it was already linked
assert len(info["user"].credentials) == 0
assert len(another_user.credentials) == 0
|
"""Programmatic control over the TWS/gateway client software."""
import asyncio
import configparser
import logging
import os
from contextlib import suppress
from dataclasses import dataclass
from typing import ClassVar, Union
from eventkit import Event
import ib_insync.util as util
from ib_insync.contract import Forex
from ib_insync.ib import IB
__all__ = ['IBC', 'IBController', 'Watchdog']
@dataclass
class IBC:
r"""
Programmatic control over starting and stopping TWS/Gateway
using IBC (https://github.com/IbcAlpha/IBC).
Args:
twsVersion (int): (required) The major version number for
TWS or gateway.
gateway (bool):
* True = gateway
* False = TWS
tradingMode (str): 'live' or 'paper'.
userid (str): IB account username. It is recommended to set the real
username/password in a secured IBC config file.
password (str): IB account password.
twsPath (str): Path to the TWS installation folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Applications
* Windows: C:\\Jts
twsSettingsPath (str): Path to the TWS settings folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Jts
* Windows: Not available
ibcPath (str): Path to the IBC installation folder.
Defaults:
* Linux: /opt/ibc
* OS X: /opt/ibc
* Windows: C:\\IBC
ibcIni (str): Path to the IBC configuration file.
Defaults:
* Linux: ~/ibc/config.ini
* OS X: ~/ibc/config.ini
* Windows: %%HOMEPATH%%\\Documents\IBC\\config.ini
javaPath (str): Path to Java executable.
Default is to use the Java VM included with TWS/gateway.
fixuserid (str): FIX account user id (gateway only).
fixpassword (str): FIX account password (gateway only).
This is not intended to be run in a notebook.
To use IBC on Windows, the proactor (or quamash) event loop
must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
Example usage:
.. code-block:: python
ibc = IBC(976, gateway=True, tradingMode='live',
userid='edemo', password='demouser')
ibc.start()
IB.run()
"""
IbcLogLevel: ClassVar = logging.DEBUG
twsVersion: int = 0
gateway: bool = False
tradingMode: str = ''
twsPath: str = ''
twsSettingsPath: str = ''
ibcPath: str = ''
ibcIni: str = ''
javaPath: str = ''
userid: str = ''
password: str = ''
fixuserid: str = ''
fixpassword: str = ''
def __post_init__(self):
self._isWindows = os.sys.platform == 'win32'
if not self.ibcPath:
self.ibcPath = '/opt/ibc' if not self._isWindows else 'C:\\IBC'
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBC')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# map from field names to cmd arguments; key=(UnixArg, WindowsArg)
args = dict(
twsVersion=('', ''),
gateway=('--gateway', '/Gateway'),
tradingMode=('--mode=', '/Mode:'),
twsPath=('--tws-path=', '/TwsPath:'),
twsSettingsPath=('--tws-settings-path=', ''),
ibcPath=('--ibc-path=', '/IbcPath:'),
ibcIni=('--ibc-ini=', '/Config:'),
javaPath=('--java-path=', '/JavaPath:'),
userid=('--user=', '/User:'),
password=('--pw=', '/PW:'),
fixuserid=('--fix-user=', '/FIXUser:'),
fixpassword=('--fix-pw=', '/FIXPW:'))
# create shell command
cmd = [
f'{self.ibcPath}\\scripts\\StartIBC.bat' if self._isWindows else
f'{self.ibcPath}/scripts/ibcstart.sh']
for k, v in util.dataclassAsDict(self).items():
arg = args[k][self._isWindows]
if v:
if arg.endswith('=') or arg.endswith(':'):
cmd.append(f'{arg}{v}')
elif arg:
cmd.append(arg)
else:
cmd.append(str(v))
# run shell command
self._proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
if self._monitor:
self._monitor.cancel()
self._monitor = None
if self._isWindows:
import subprocess
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(self._proc.pid)])
else:
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.log(IBC.IbcLogLevel, line.strip().decode())
@dataclass
class IBController:
"""
For new installations it is recommended to use IBC instead.
Programmatic control over starting and stopping TWS/Gateway
using IBController (https://github.com/ib-controller/ib-controller).
On Windows the the proactor (or quamash) event loop must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
This is not intended to be run in a notebook.
"""
APP: str = 'TWS' # 'TWS' or 'GATEWAY'
TWS_MAJOR_VRSN: str = '969'
TRADING_MODE: str = 'live' # 'live' or 'paper'
IBC_INI: str = '~/IBController/IBController.ini'
IBC_PATH: str = '~/IBController'
TWS_PATH: str = '~/Jts'
LOG_PATH: str = '~/IBController/Logs'
TWSUSERID: str = ''
TWSPASSWORD: str = ''
JAVA_PATH: str = ''
TWS_CONFIG_PATH: str = ''
def __post_init__(self):
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBController')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def stop(self):
"""Cleanly shutdown TWS/IBG."""
util.run(self.stopAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# expand paths
d = util.dataclassAsDict(self)
for k, v in d.items():
if k.endswith('_PATH') or k.endswith('_INI'):
d[k] = os.path.expanduser(v)
if not d['TWS_CONFIG_PATH']:
d['TWS_CONFIG_PATH'] = d['TWS_PATH']
self.__dict__.update(**d)
# run shell command
ext = 'bat' if os.sys.platform == 'win32' else 'sh'
cmd = f'{d['IBC_PATH']}/Scripts/DisplayBannerAndLaunch.{ext}'
env = {**os.environ, **d}
self._proc = await asyncio.create_subprocess_exec(
cmd, env=env, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def stopAsync(self):
if not self._proc:
return
self._logger.info('Stopping')
# read ibcontroller ini file to get controller port
txt = '[section]' + open(self.IBC_INI).read()
config = configparser.ConfigParser()
config.read_string(txt)
contrPort = config.getint('section', 'IbControllerPort')
_reader, writer = await asyncio.open_connection('127.0.0.1', contrPort)
writer.write(b'STOP')
await writer.drain()
writer.close()
await self._proc.wait()
self._proc = None
self._monitor.cancel()
self._monitor = None
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
self._monitor.cancel()
self._monitor = None
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.info(line.strip().decode())
@dataclass
class Watchdog:
"""
Start, connect and watch over the TWS or gateway app and try to keep it
up and running. It is intended to be used in an event-driven
application that properly initializes itself upon (re-)connect.
It is not intended to be used in a notebook or in imperative-style code.
Do not expect Watchdog to magically shield you from reality. Do not use
Watchdog unless you understand what it does and doesn't do.
Args:
controller (Union[IBC, IBController]): (required) IBC or IBController
instance.
ib (IB): (required) IB instance to be used. Do no connect this
instance as Watchdog takes care of that.
host (str): Used for connecting IB instance.
port (int): Used for connecting IB instance.
clientId (int): Used for connecting IB instance.
connectTimeout (float): Used for connecting IB instance.
appStartupTime (float): Time (in seconds) that the app is given
to start up. Make sure that it is given ample time.
appTimeout (float): Timeout (in seconds) for network traffic idle time.
retryDelay (float): Time (in seconds) to restart app after a
previous failure.
The idea is to wait until there is no traffic coming from the app for
a certain amount of time (the ``appTimeout`` parameter). This triggers
a historical request to be placed just to see if the app is still alive
and well. If yes, then continue, if no then restart the whole app
and reconnect. Restarting will also occur directly on errors 1100 and 100.
Example usage:
.. code-block:: python
def onConnected():
print(ib.accountValues())
ibc = IBC(974, gateway=True, tradingMode='paper')
ib = IB()
ib.connectedEvent += onConnected
watchdog = Watchdog(ibc, ib, port=4002)
watchdog.start()
ib.run()
Events:
* ``startingEvent`` (watchdog: :class:`.Watchdog`)
* ``startedEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppingEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppedEvent`` (watchdog: :class:`.Watchdog`)
* ``softTimeoutEvent`` (watchdog: :class:`.Watchdog`)
* ``hardTimeoutEvent`` (watchdog: :class:`.Watchdog`)
"""
events = [
'startingEvent', 'startedEvent', 'stoppingEvent', 'stoppedEvent',
'softTimeoutEvent', 'hardTimeoutEvent']
controller: Union[IBC, IBController]
ib: IB
host: str = '127.0.0.1'
port: int = 7497
clientId: int = 1
connectTimeout: float = 2
appStartupTime: float = 30
appTimeout: float = 20
retryDelay: float = 2
def __post_init__(self):
self.startingEvent = Event('startingEvent')
self.startedEvent = Event('startedEvent')
self.stoppingEvent = Event('stoppingEvent')
self.stoppedEvent = Event('stoppedEvent')
self.softTimeoutEvent = Event('softTimeoutEvent')
self.hardTimeoutEvent = Event('hardTimeoutEvent')
if not self.controller:
raise ValueError('No controller supplied')
if not self.ib:
raise ValueError('No IB instance supplied')
if self.ib.isConnected():
raise ValueError('IB instance must not be connected')
assert 0 < self.appTimeout < 60
assert self.retryDelay > 0
self._runner = None
self._logger = logging.getLogger('ib_insync.Watchdog')
def start(self):
self._logger.info('Starting')
self.startingEvent.emit(self)
self._runner = asyncio.ensure_future(self.runAsync())
def stop(self):
self._logger.info('Stopping')
self.stoppingEvent.emit(self)
self.ib.disconnect()
self._runner = None
async def runAsync(self):
def onTimeout(idlePeriod):
if not waiter.done():
waiter.set_result(None)
def onError(reqId, errorCode, errorString, contract):
if errorCode in [1100, 100] and not waiter.done():
waiter.set_exception(Warning(f'Error {errorCode}'))
def onDisconnected():
if not waiter.done():
waiter.set_exception(Warning('Disconnected'))
while self._runner:
try:
await self.controller.startAsync()
await asyncio.sleep(self.appStartupTime)
await self.ib.connectAsync(
self.host, self.port, self.clientId, self.connectTimeout)
self.startedEvent.emit(self)
self.ib.setTimeout(self.appTimeout)
self.ib.timeoutEvent += onTimeout
self.ib.errorEvent += onError
self.ib.disconnectedEvent += onDisconnected
while self._runner:
waiter = asyncio.Future()
await waiter
# soft timeout, probe the app with a historical request
self._logger.debug('Soft timeout')
self.softTimeoutEvent.emit(self)
probe = self.ib.reqHistoricalDataAsync(
Forex('EURUSD'), '', '30 S', '5 secs',
'MIDPOINT', False)
bars = None
with suppress(asyncio.TimeoutError):
bars = await asyncio.wait_for(probe, 4)
if not bars:
self.hardTimeoutEvent.emit(self)
raise Warning('Hard timeout')
self.ib.setTimeout(self.appTimeout)
except ConnectionRefusedError:
pass
except Warning as w:
self._logger.warning(w)
except Exception as e:
self._logger.exception(e)
finally:
self.ib.timeoutEvent -= onTimeout
self.ib.errorEvent -= onError
self.ib.disconnectedEvent -= onDisconnected
await self.controller.terminateAsync()
self.stoppedEvent.emit(self)
if self._runner:
await asyncio.sleep(self.retryDelay)
if __name__ == '__main__':
asyncio.get_event_loop().set_debug(True)
util.logToConsole(logging.DEBUG)
ibc = IBC(976, gateway=True, tradingMode='paper')
# userid='edemo', password='demouser')
ib = IB()
app = Watchdog(ibc, ib, port=4002, appStartupTime=15, appTimeout=10)
app.start()
IB.run()
| """Programmatic control over the TWS/gateway client software."""
import asyncio
import configparser
import logging
import os
from contextlib import suppress
from dataclasses import dataclass
from typing import ClassVar, Union
from eventkit import Event
import ib_insync.util as util
from ib_insync.contract import Forex
from ib_insync.ib import IB
__all__ = ['IBC', 'IBController', 'Watchdog']
@dataclass
class IBC:
r"""
Programmatic control over starting and stopping TWS/Gateway
using IBC (https://github.com/IbcAlpha/IBC).
Args:
twsVersion (int): (required) The major version number for
TWS or gateway.
gateway (bool):
* True = gateway
* False = TWS
tradingMode (str): 'live' or 'paper'.
userid (str): IB account username. It is recommended to set the real
username/password in a secured IBC config file.
password (str): IB account password.
twsPath (str): Path to the TWS installation folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Applications
* Windows: C:\\Jts
twsSettingsPath (str): Path to the TWS settings folder.
Defaults:
* Linux: ~/Jts
* OS X: ~/Jts
* Windows: Not available
ibcPath (str): Path to the IBC installation folder.
Defaults:
* Linux: /opt/ibc
* OS X: /opt/ibc
* Windows: C:\\IBC
ibcIni (str): Path to the IBC configuration file.
Defaults:
* Linux: ~/ibc/config.ini
* OS X: ~/ibc/config.ini
* Windows: %%HOMEPATH%%\\Documents\IBC\\config.ini
javaPath (str): Path to Java executable.
Default is to use the Java VM included with TWS/gateway.
fixuserid (str): FIX account user id (gateway only).
fixpassword (str): FIX account password (gateway only).
This is not intended to be run in a notebook.
To use IBC on Windows, the proactor (or quamash) event loop
must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
Example usage:
.. code-block:: python
ibc = IBC(976, gateway=True, tradingMode='live',
userid='edemo', password='demouser')
ibc.start()
IB.run()
"""
IbcLogLevel: ClassVar = logging.DEBUG
twsVersion: int = 0
gateway: bool = False
tradingMode: str = ''
twsPath: str = ''
twsSettingsPath: str = ''
ibcPath: str = ''
ibcIni: str = ''
javaPath: str = ''
userid: str = ''
password: str = ''
fixuserid: str = ''
fixpassword: str = ''
def __post_init__(self):
self._isWindows = os.sys.platform == 'win32'
if not self.ibcPath:
self.ibcPath = '/opt/ibc' if not self._isWindows else 'C:\\IBC'
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBC')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# map from field names to cmd arguments; key=(UnixArg, WindowsArg)
args = dict(
twsVersion=('', ''),
gateway=('--gateway', '/Gateway'),
tradingMode=('--mode=', '/Mode:'),
twsPath=('--tws-path=', '/TwsPath:'),
twsSettingsPath=('--tws-settings-path=', ''),
ibcPath=('--ibc-path=', '/IbcPath:'),
ibcIni=('--ibc-ini=', '/Config:'),
javaPath=('--java-path=', '/JavaPath:'),
userid=('--user=', '/User:'),
password=('--pw=', '/PW:'),
fixuserid=('--fix-user=', '/FIXUser:'),
fixpassword=('--fix-pw=', '/FIXPW:'))
# create shell command
cmd = [
f'{self.ibcPath}\\scripts\\StartIBC.bat' if self._isWindows else
f'{self.ibcPath}/scripts/ibcstart.sh']
for k, v in util.dataclassAsDict(self).items():
arg = args[k][self._isWindows]
if v:
if arg.endswith('=') or arg.endswith(':'):
cmd.append(f'{arg}{v}')
elif arg:
cmd.append(arg)
else:
cmd.append(str(v))
# run shell command
self._proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
if self._monitor:
self._monitor.cancel()
self._monitor = None
if self._isWindows:
import subprocess
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(self._proc.pid)])
else:
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.log(IBC.IbcLogLevel, line.strip().decode())
@dataclass
class IBController:
"""
For new installations it is recommended to use IBC instead.
Programmatic control over starting and stopping TWS/Gateway
using IBController (https://github.com/ib-controller/ib-controller).
On Windows the the proactor (or quamash) event loop must have been set:
.. code-block:: python
import asyncio
asyncio.set_event_loop(asyncio.ProactorEventLoop())
This is not intended to be run in a notebook.
"""
APP: str = 'TWS' # 'TWS' or 'GATEWAY'
TWS_MAJOR_VRSN: str = '969'
TRADING_MODE: str = 'live' # 'live' or 'paper'
IBC_INI: str = '~/IBController/IBController.ini'
IBC_PATH: str = '~/IBController'
TWS_PATH: str = '~/Jts'
LOG_PATH: str = '~/IBController/Logs'
TWSUSERID: str = ''
TWSPASSWORD: str = ''
JAVA_PATH: str = ''
TWS_CONFIG_PATH: str = ''
def __post_init__(self):
self._proc = None
self._monitor = None
self._logger = logging.getLogger('ib_insync.IBController')
def __enter__(self):
self.start()
return self
def __exit__(self, *_exc):
self.terminate()
def start(self):
"""Launch TWS/IBG."""
util.run(self.startAsync())
def stop(self):
"""Cleanly shutdown TWS/IBG."""
util.run(self.stopAsync())
def terminate(self):
"""Terminate TWS/IBG."""
util.run(self.terminateAsync())
async def startAsync(self):
if self._proc:
return
self._logger.info('Starting')
# expand paths
d = util.dataclassAsDict(self)
for k, v in d.items():
if k.endswith('_PATH') or k.endswith('_INI'):
d[k] = os.path.expanduser(v)
if not d['TWS_CONFIG_PATH']:
d['TWS_CONFIG_PATH'] = d['TWS_PATH']
self.__dict__.update(**d)
# run shell command
ext = 'bat' if os.sys.platform == 'win32' else 'sh'
cmd = f'{d["IBC_PATH"]}/Scripts/DisplayBannerAndLaunch.{ext}'
env = {**os.environ, **d}
self._proc = await asyncio.create_subprocess_exec(
cmd, env=env, stdout=asyncio.subprocess.PIPE)
self._monitor = asyncio.ensure_future(self.monitorAsync())
async def stopAsync(self):
if not self._proc:
return
self._logger.info('Stopping')
# read ibcontroller ini file to get controller port
txt = '[section]' + open(self.IBC_INI).read()
config = configparser.ConfigParser()
config.read_string(txt)
contrPort = config.getint('section', 'IbControllerPort')
_reader, writer = await asyncio.open_connection('127.0.0.1', contrPort)
writer.write(b'STOP')
await writer.drain()
writer.close()
await self._proc.wait()
self._proc = None
self._monitor.cancel()
self._monitor = None
async def terminateAsync(self):
if not self._proc:
return
self._logger.info('Terminating')
self._monitor.cancel()
self._monitor = None
with suppress(ProcessLookupError):
self._proc.terminate()
await self._proc.wait()
self._proc = None
async def monitorAsync(self):
while self._proc:
line = await self._proc.stdout.readline()
if not line:
break
self._logger.info(line.strip().decode())
@dataclass
class Watchdog:
"""
Start, connect and watch over the TWS or gateway app and try to keep it
up and running. It is intended to be used in an event-driven
application that properly initializes itself upon (re-)connect.
It is not intended to be used in a notebook or in imperative-style code.
Do not expect Watchdog to magically shield you from reality. Do not use
Watchdog unless you understand what it does and doesn't do.
Args:
controller (Union[IBC, IBController]): (required) IBC or IBController
instance.
ib (IB): (required) IB instance to be used. Do no connect this
instance as Watchdog takes care of that.
host (str): Used for connecting IB instance.
port (int): Used for connecting IB instance.
clientId (int): Used for connecting IB instance.
connectTimeout (float): Used for connecting IB instance.
appStartupTime (float): Time (in seconds) that the app is given
to start up. Make sure that it is given ample time.
appTimeout (float): Timeout (in seconds) for network traffic idle time.
retryDelay (float): Time (in seconds) to restart app after a
previous failure.
The idea is to wait until there is no traffic coming from the app for
a certain amount of time (the ``appTimeout`` parameter). This triggers
a historical request to be placed just to see if the app is still alive
and well. If yes, then continue, if no then restart the whole app
and reconnect. Restarting will also occur directly on errors 1100 and 100.
Example usage:
.. code-block:: python
def onConnected():
print(ib.accountValues())
ibc = IBC(974, gateway=True, tradingMode='paper')
ib = IB()
ib.connectedEvent += onConnected
watchdog = Watchdog(ibc, ib, port=4002)
watchdog.start()
ib.run()
Events:
* ``startingEvent`` (watchdog: :class:`.Watchdog`)
* ``startedEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppingEvent`` (watchdog: :class:`.Watchdog`)
* ``stoppedEvent`` (watchdog: :class:`.Watchdog`)
* ``softTimeoutEvent`` (watchdog: :class:`.Watchdog`)
* ``hardTimeoutEvent`` (watchdog: :class:`.Watchdog`)
"""
events = [
'startingEvent', 'startedEvent', 'stoppingEvent', 'stoppedEvent',
'softTimeoutEvent', 'hardTimeoutEvent']
controller: Union[IBC, IBController]
ib: IB
host: str = '127.0.0.1'
port: int = 7497
clientId: int = 1
connectTimeout: float = 2
appStartupTime: float = 30
appTimeout: float = 20
retryDelay: float = 2
def __post_init__(self):
self.startingEvent = Event('startingEvent')
self.startedEvent = Event('startedEvent')
self.stoppingEvent = Event('stoppingEvent')
self.stoppedEvent = Event('stoppedEvent')
self.softTimeoutEvent = Event('softTimeoutEvent')
self.hardTimeoutEvent = Event('hardTimeoutEvent')
if not self.controller:
raise ValueError('No controller supplied')
if not self.ib:
raise ValueError('No IB instance supplied')
if self.ib.isConnected():
raise ValueError('IB instance must not be connected')
assert 0 < self.appTimeout < 60
assert self.retryDelay > 0
self._runner = None
self._logger = logging.getLogger('ib_insync.Watchdog')
def start(self):
self._logger.info('Starting')
self.startingEvent.emit(self)
self._runner = asyncio.ensure_future(self.runAsync())
def stop(self):
self._logger.info('Stopping')
self.stoppingEvent.emit(self)
self.ib.disconnect()
self._runner = None
async def runAsync(self):
def onTimeout(idlePeriod):
if not waiter.done():
waiter.set_result(None)
def onError(reqId, errorCode, errorString, contract):
if errorCode in [1100, 100] and not waiter.done():
waiter.set_exception(Warning(f'Error {errorCode}'))
def onDisconnected():
if not waiter.done():
waiter.set_exception(Warning('Disconnected'))
while self._runner:
try:
await self.controller.startAsync()
await asyncio.sleep(self.appStartupTime)
await self.ib.connectAsync(
self.host, self.port, self.clientId, self.connectTimeout)
self.startedEvent.emit(self)
self.ib.setTimeout(self.appTimeout)
self.ib.timeoutEvent += onTimeout
self.ib.errorEvent += onError
self.ib.disconnectedEvent += onDisconnected
while self._runner:
waiter = asyncio.Future()
await waiter
# soft timeout, probe the app with a historical request
self._logger.debug('Soft timeout')
self.softTimeoutEvent.emit(self)
probe = self.ib.reqHistoricalDataAsync(
Forex('EURUSD'), '', '30 S', '5 secs',
'MIDPOINT', False)
bars = None
with suppress(asyncio.TimeoutError):
bars = await asyncio.wait_for(probe, 4)
if not bars:
self.hardTimeoutEvent.emit(self)
raise Warning('Hard timeout')
self.ib.setTimeout(self.appTimeout)
except ConnectionRefusedError:
pass
except Warning as w:
self._logger.warning(w)
except Exception as e:
self._logger.exception(e)
finally:
self.ib.timeoutEvent -= onTimeout
self.ib.errorEvent -= onError
self.ib.disconnectedEvent -= onDisconnected
await self.controller.terminateAsync()
self.stoppedEvent.emit(self)
if self._runner:
await asyncio.sleep(self.retryDelay)
if __name__ == '__main__':
asyncio.get_event_loop().set_debug(True)
util.logToConsole(logging.DEBUG)
ibc = IBC(976, gateway=True, tradingMode='paper')
# userid='edemo', password='demouser')
ib = IB()
app = Watchdog(ibc, ib, port=4002, appStartupTime=15, appTimeout=10)
app.start()
IB.run()
|
"""API functionality for websites"""
import logging
import os
from typing import Dict, List, Optional
from uuid import UUID
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.db.models import Q, QuerySet
from magic import Magic
from mitol.common.utils import max_or_none
from mitol.mail.api import get_message_sender
from websites.constants import CONTENT_FILENAME_MAX_LEN, RESOURCE_TYPE_VIDEO
from websites.messages import (
PreviewOrPublishFailureMessage,
PreviewOrPublishSuccessMessage,
)
from websites.models import Website, WebsiteContent, WebsiteStarter
from websites.utils import get_dict_field, set_dict_field
log = logging.getLogger(__name__)
def get_valid_new_filename(
website_pk: str,
dirpath: Optional[str],
filename_base: str,
exclude_text_id: Optional[str] = None,
) -> str:
"""
Given a filename to act as a base/prefix, returns a filename that will satisfy unique constraints,
adding/incrementing a numerical suffix as necessary.
Examples:
In database: WebsiteContent(filename="my-filename")...
get_valid_new_filename("my-filename") == "my-filename2"
In database: WebsiteContent(filename="my-filename99")...
get_valid_new_filename("my-filename99") == "my-filename100"
"""
website_content_qset = WebsiteContent.objects.all_with_deleted().filter(
website_id=website_pk, dirpath=dirpath
)
if exclude_text_id is not None:
website_content_qset = website_content_qset.exclude(text_id=exclude_text_id)
filename_exists = website_content_qset.filter(filename=filename_base).exists()
if not filename_exists:
return filename_base
return find_available_name(
website_content_qset,
filename_base,
"filename",
max_length=CONTENT_FILENAME_MAX_LEN,
)
def get_valid_new_slug(slug_base: str, path: str) -> str:
"""
Given a slug to act as a base/prefix, returns a slug that will satisfy unique constraints,
adding/incrementing a numerical suffix as necessary.
"""
starter_qset = WebsiteStarter.objects.exclude(path=path)
slug_exists = starter_qset.filter(slug=slug_base).exists()
if not slug_exists:
return slug_base
return find_available_name(starter_qset, slug_base, "slug", max_length=30)
def find_available_name(
website_content_qset: QuerySet,
initial_filename_base: str,
fieldname: str,
max_length: Optional[int] = CONTENT_FILENAME_MAX_LEN,
extension: Optional[str] = None,
) -> str:
"""
Returns a filename with the lowest possible suffix given some base filename. If the applied suffix
makes the filename longer than the filename max length, characters are removed from the
right of the filename to make room.
EXAMPLES:
initial_filename_base = "myfile"
Existing filenames = "myfile"
Return value = "myfile1"
initial_filename_base = "myfile"
Existing filenames = "myfile", "myfile1" through "myfile5"
Return value = "myfile6"
initial_filename_base = "abcdefghijklmnopqrstuvwxyz" (26 characters, assuming 26 character max)
Existing filenames = "abcdefghijklmnopqrstuvwxyz"
Return value = "abcdefghijklmnopqrstuvwxy1" # pragma: allowlist secret
initial_filename_base = "abcdefghijklmnopqrstuvwxy" (25 characters long, assuming 26 character max)
Existing filenames = "abc...y", "abc...y1" through "abc...y9"
Return value = "abcdefghijklmnopqrstuvwx10" # pragma: allowlist secret
"""
# Keeps track of the number of characters that must be cut from the filename to be less than
# the filename max length when the suffix is applied.
chars_to_truncate = 0 if len(initial_filename_base) < max_length else 1
# Any query for suffixed filenames could come up empty. The minimum suffix will be added to
# the filename in that case.
current_min_suffix = 2
if extension is None:
extension = ""
while chars_to_truncate < len(initial_filename_base):
name_base = initial_filename_base[
0 : len(initial_filename_base) - chars_to_truncate
]
kwargs = {
f"{fieldname}__regex": r"{name_base}[0-9]+{extension}".format(
name_base=name_base, extension=extension
)
}
# Find names that match the namebase and have a numerical suffix, then find the max suffix
existing_names = website_content_qset.filter(**kwargs).values_list(
fieldname, flat=True
)
if extension:
existing_names = [os.path.splitext(name)[0] for name in existing_names]
max_suffix = max_or_none(
int(filename[len(name_base) :]) for filename in existing_names
)
if max_suffix is None:
return f"{"".join([name_base, str(current_min_suffix)])}{extension}"
else:
next_suffix = max_suffix + 1
candidate_name = "".join([name_base, str(next_suffix), extension])
# If the next suffix adds a digit and causes the filename to exceed the character limit,
# keep searching.
if len(candidate_name) <= max_length:
return candidate_name
# At this point, we know there are no suffixes left to add to this filename base that was tried,
# so we will need to remove characters from the end of that filename base to make room for a longer
# suffix.
chars_to_truncate = chars_to_truncate + 1
available_suffix_digits = max_length - (
len(initial_filename_base) - chars_to_truncate
)
# If there is space for 4 digits for the suffix, the minimum value it could be is 1000, or 10^3
current_min_suffix = 10 ** (available_suffix_digits - 1)
def fetch_website(filter_value: str) -> Website:
"""
Attempts to fetch a Website based on several properties
"""
if len(filter_value) in {32, 36}:
try:
parsed_uuid = UUID(filter_value, version=4)
website = Website.objects.filter(uuid=parsed_uuid).first()
if website is not None:
return website
except ValueError:
pass
website_results = Website.objects.filter(
Q(name__iexact=filter_value) | Q(title__iexact=filter_value)
).all()
if len(website_results) == 0:
raise Website.DoesNotExist(
f"Could not find a Website with a matching uuid, name, or title ('{filter_value}')"
)
if len(website_results) == 1:
return website_results[0]
sorted_results = sorted(
website_results, key=lambda _website: 1 if _website.name == filter_value else 2
)
return next(sorted_results)
def is_ocw_site(website: Website) -> bool:
"""Return true if the site is an OCW site"""
return website.starter.slug == settings.OCW_IMPORT_STARTER_SLUG
def update_youtube_thumbnail(website_id: str, metadata: Dict, overwrite=False):
""" Assign a youtube thumbnail url if appropriate to a website's metadata"""
website = Website.objects.get(uuid=website_id)
if is_ocw_site(website):
youtube_id = get_dict_field(metadata, settings.YT_FIELD_ID)
if youtube_id and (
not get_dict_field(metadata, settings.YT_FIELD_THUMBNAIL) or overwrite
):
set_dict_field(
metadata,
settings.YT_FIELD_THUMBNAIL,
f"https://img.youtube.com/vi/{youtube_id}/0.jpg",
)
def unassigned_youtube_ids(website: Website) -> List[WebsiteContent]:
"""Return a list of WebsiteContent objects for videos with unassigned youtube ids"""
if not is_ocw_site(website):
return []
query_id_field = f"metadata__{"__".join(settings.YT_FIELD_ID.split("."))}"
return WebsiteContent.objects.filter(
Q(website=website)
& Q(metadata__resourcetype=RESOURCE_TYPE_VIDEO)
& (Q(**{query_id_field: None}) | Q(**{query_id_field: ""}))
)
def mail_website_admins_on_publish(website: Website, version: str, success: bool):
"""Send a success or failure message to site admins on publishing failure"""
site_admins = list(website.admin_group.user_set.all()) + [website.owner]
if not success:
log.error("%s version build failed for site %s", version, website.name)
message = (
PreviewOrPublishSuccessMessage if success else PreviewOrPublishFailureMessage
)
with get_message_sender(message) as sender:
for user in site_admins:
sender.build_and_send_message(
user,
{
"site": {
"title": website.title,
"url": website.get_url(version),
},
"version": version,
},
)
def detect_mime_type(uploaded_file: UploadedFile) -> str:
"""Detect mime type of an uploaded file"""
magic = Magic(mime=True)
chunk = next(uploaded_file.chunks(chunk_size=2048))
return magic.from_buffer(chunk)
| """API functionality for websites"""
import logging
import os
from typing import Dict, List, Optional
from uuid import UUID
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.db.models import Q, QuerySet
from magic import Magic
from mitol.common.utils import max_or_none
from mitol.mail.api import get_message_sender
from websites.constants import CONTENT_FILENAME_MAX_LEN, RESOURCE_TYPE_VIDEO
from websites.messages import (
PreviewOrPublishFailureMessage,
PreviewOrPublishSuccessMessage,
)
from websites.models import Website, WebsiteContent, WebsiteStarter
from websites.utils import get_dict_field, set_dict_field
log = logging.getLogger(__name__)
def get_valid_new_filename(
website_pk: str,
dirpath: Optional[str],
filename_base: str,
exclude_text_id: Optional[str] = None,
) -> str:
"""
Given a filename to act as a base/prefix, returns a filename that will satisfy unique constraints,
adding/incrementing a numerical suffix as necessary.
Examples:
In database: WebsiteContent(filename="my-filename")...
get_valid_new_filename("my-filename") == "my-filename2"
In database: WebsiteContent(filename="my-filename99")...
get_valid_new_filename("my-filename99") == "my-filename100"
"""
website_content_qset = WebsiteContent.objects.all_with_deleted().filter(
website_id=website_pk, dirpath=dirpath
)
if exclude_text_id is not None:
website_content_qset = website_content_qset.exclude(text_id=exclude_text_id)
filename_exists = website_content_qset.filter(filename=filename_base).exists()
if not filename_exists:
return filename_base
return find_available_name(
website_content_qset,
filename_base,
"filename",
max_length=CONTENT_FILENAME_MAX_LEN,
)
def get_valid_new_slug(slug_base: str, path: str) -> str:
"""
Given a slug to act as a base/prefix, returns a slug that will satisfy unique constraints,
adding/incrementing a numerical suffix as necessary.
"""
starter_qset = WebsiteStarter.objects.exclude(path=path)
slug_exists = starter_qset.filter(slug=slug_base).exists()
if not slug_exists:
return slug_base
return find_available_name(starter_qset, slug_base, "slug", max_length=30)
def find_available_name(
website_content_qset: QuerySet,
initial_filename_base: str,
fieldname: str,
max_length: Optional[int] = CONTENT_FILENAME_MAX_LEN,
extension: Optional[str] = None,
) -> str:
"""
Returns a filename with the lowest possible suffix given some base filename. If the applied suffix
makes the filename longer than the filename max length, characters are removed from the
right of the filename to make room.
EXAMPLES:
initial_filename_base = "myfile"
Existing filenames = "myfile"
Return value = "myfile1"
initial_filename_base = "myfile"
Existing filenames = "myfile", "myfile1" through "myfile5"
Return value = "myfile6"
initial_filename_base = "abcdefghijklmnopqrstuvwxyz" (26 characters, assuming 26 character max)
Existing filenames = "abcdefghijklmnopqrstuvwxyz"
Return value = "abcdefghijklmnopqrstuvwxy1" # pragma: allowlist secret
initial_filename_base = "abcdefghijklmnopqrstuvwxy" (25 characters long, assuming 26 character max)
Existing filenames = "abc...y", "abc...y1" through "abc...y9"
Return value = "abcdefghijklmnopqrstuvwx10" # pragma: allowlist secret
"""
# Keeps track of the number of characters that must be cut from the filename to be less than
# the filename max length when the suffix is applied.
chars_to_truncate = 0 if len(initial_filename_base) < max_length else 1
# Any query for suffixed filenames could come up empty. The minimum suffix will be added to
# the filename in that case.
current_min_suffix = 2
if extension is None:
extension = ""
while chars_to_truncate < len(initial_filename_base):
name_base = initial_filename_base[
0 : len(initial_filename_base) - chars_to_truncate
]
kwargs = {
f"{fieldname}__regex": r"{name_base}[0-9]+{extension}".format(
name_base=name_base, extension=extension
)
}
# Find names that match the namebase and have a numerical suffix, then find the max suffix
existing_names = website_content_qset.filter(**kwargs).values_list(
fieldname, flat=True
)
if extension:
existing_names = [os.path.splitext(name)[0] for name in existing_names]
max_suffix = max_or_none(
int(filename[len(name_base) :]) for filename in existing_names
)
if max_suffix is None:
return f"{''.join([name_base, str(current_min_suffix)])}{extension}"
else:
next_suffix = max_suffix + 1
candidate_name = "".join([name_base, str(next_suffix), extension])
# If the next suffix adds a digit and causes the filename to exceed the character limit,
# keep searching.
if len(candidate_name) <= max_length:
return candidate_name
# At this point, we know there are no suffixes left to add to this filename base that was tried,
# so we will need to remove characters from the end of that filename base to make room for a longer
# suffix.
chars_to_truncate = chars_to_truncate + 1
available_suffix_digits = max_length - (
len(initial_filename_base) - chars_to_truncate
)
# If there is space for 4 digits for the suffix, the minimum value it could be is 1000, or 10^3
current_min_suffix = 10 ** (available_suffix_digits - 1)
def fetch_website(filter_value: str) -> Website:
"""
Attempts to fetch a Website based on several properties
"""
if len(filter_value) in {32, 36}:
try:
parsed_uuid = UUID(filter_value, version=4)
website = Website.objects.filter(uuid=parsed_uuid).first()
if website is not None:
return website
except ValueError:
pass
website_results = Website.objects.filter(
Q(name__iexact=filter_value) | Q(title__iexact=filter_value)
).all()
if len(website_results) == 0:
raise Website.DoesNotExist(
f"Could not find a Website with a matching uuid, name, or title ('{filter_value}')"
)
if len(website_results) == 1:
return website_results[0]
sorted_results = sorted(
website_results, key=lambda _website: 1 if _website.name == filter_value else 2
)
return next(sorted_results)
def is_ocw_site(website: Website) -> bool:
"""Return true if the site is an OCW site"""
return website.starter.slug == settings.OCW_IMPORT_STARTER_SLUG
def update_youtube_thumbnail(website_id: str, metadata: Dict, overwrite=False):
""" Assign a youtube thumbnail url if appropriate to a website's metadata"""
website = Website.objects.get(uuid=website_id)
if is_ocw_site(website):
youtube_id = get_dict_field(metadata, settings.YT_FIELD_ID)
if youtube_id and (
not get_dict_field(metadata, settings.YT_FIELD_THUMBNAIL) or overwrite
):
set_dict_field(
metadata,
settings.YT_FIELD_THUMBNAIL,
f"https://img.youtube.com/vi/{youtube_id}/0.jpg",
)
def unassigned_youtube_ids(website: Website) -> List[WebsiteContent]:
"""Return a list of WebsiteContent objects for videos with unassigned youtube ids"""
if not is_ocw_site(website):
return []
query_id_field = f"metadata__{'__'.join(settings.YT_FIELD_ID.split('.'))}"
return WebsiteContent.objects.filter(
Q(website=website)
& Q(metadata__resourcetype=RESOURCE_TYPE_VIDEO)
& (Q(**{query_id_field: None}) | Q(**{query_id_field: ""}))
)
def mail_website_admins_on_publish(website: Website, version: str, success: bool):
"""Send a success or failure message to site admins on publishing failure"""
site_admins = list(website.admin_group.user_set.all()) + [website.owner]
if not success:
log.error("%s version build failed for site %s", version, website.name)
message = (
PreviewOrPublishSuccessMessage if success else PreviewOrPublishFailureMessage
)
with get_message_sender(message) as sender:
for user in site_admins:
sender.build_and_send_message(
user,
{
"site": {
"title": website.title,
"url": website.get_url(version),
},
"version": version,
},
)
def detect_mime_type(uploaded_file: UploadedFile) -> str:
"""Detect mime type of an uploaded file"""
magic = Magic(mime=True)
chunk = next(uploaded_file.chunks(chunk_size=2048))
return magic.from_buffer(chunk)
|
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
_util - Internal utilities library
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
import base64
import functools
# pylint: disable=E0401 # Pylint might not have these in our path
import requests
import urllib3
from urllib3.exceptions import InsecureRequestWarning
from ._version import _TITLE, _VERSION
from ._result import Result
urllib3.disable_warnings(InsecureRequestWarning)
# Restrict requests to only allowed HTTP methods
_ALLOWED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'UPDATE']
# Default user-agent string
_USER_AGENT = f"{_TITLE}/{str(_VERSION)}"
def validate_payload(validator: dict, params: dict, required: list = None) -> bool:
"""
Validates parameters and body payloads sent to the API.
"""
# Repurposed with permission from https://github.com/yaleman/crowdstrike_api
# __
# ( (\
# \ =\
# __\_ `--\
# (____))( \----
# (____)) _ Thanks
# (____)) James!
# (____))____/----
if required:
for key in required:
if key not in params:
raise ValueError(f"Argument {key} must be specified.")
for key in params:
if key not in validator:
raise ValueError(f"{key} is not a valid argument.")
if not isinstance(params[key], validator[key]):
raise TypeError(f"{key} is not the valid type. Should be: {validator[key]}, was {type(params[key])}")
return True
def parse_id_list(id_list) -> str:
"""
Converts a list of IDs to a comma-delimited string.
"""
if isinstance(id_list, list):
returned = ""
for string in id_list:
if len(returned) > 1:
returned += ","
returned += str(string)
else:
returned = id_list
return returned
def generate_b64cred(client_id: str, client_secret: str) -> str:
"""
base64 encodes passed client_id and client_secret for authorization headers.
"""
cred = "{}:{}".format(client_id, client_secret)
b64_byt = base64.b64encode(cred.encode("ascii"))
encoded = b64_byt.decode("ascii")
return encoded
def force_default(defaults: list, default_types: list = None):
"""
This function forces default values and is designed to decorate other functions.
defaults = list of values to default
default_types = list of types to default the values to
Example: @force_default(defaults=["parameters], default_types=["dict"])
"""
if not default_types:
default_types = []
def wrapper(func):
"""Inner wrapper."""
@functools.wraps(func)
def factory(*args, **kwargs):
"""
This method is a factory and runs through arguments passed to the called function,
setting defaults on values within the **kwargs dictionary when necessary
as specified in our "defaults" list that is passed to the parent wrapper.
"""
element_count = 0 # Tracker so we can retrieve matching data types
# Loop through every element specified in our defaults list
for element in defaults:
if element in kwargs:
# It exists but it's a NoneType
if kwargs.get(element) is None:
kwargs[element] = get_default(default_types, element_count)
else:
# Not present whatsoever
kwargs[element] = get_default(default_types, element_count)
# Increment our tracker for our sibling default_types list
element_count += 1
return func(*args, **kwargs)
return factory
return wrapper
def service_request(caller: object = None, **kwargs) -> object: # May return dict or object datatypes
"""
Checks for token expiration, refreshing if possible and then performs the request.
"""
if caller:
try:
if caller.auth_object:
if caller.auth_object.token_expired():
auth_response = caller.auth_object.token()
if auth_response["status_code"] == 201:
caller.headers['Authorization'] = 'Bearer {}'.format(auth_response['body']['access_token'])
else:
caller.headers['Authorization'] = 'Bearer '
except AttributeError:
pass
try:
proxy = caller.proxy
except AttributeError:
proxy = None
try:
timeout = caller.timeout
except AttributeError:
timeout = None
returned = perform_request(proxy=proxy, timeout=timeout, **kwargs)
return returned
@force_default(defaults=["headers"], default_types=["dict"])
def perform_request(endpoint: str = "", headers: dict = None, **kwargs) -> object: # May return dict or object datatypes
"""
Leverages the requests library to perform the requested CrowdStrike OAuth2 API operation.
method: str - HTTP method to use when communicating with the API
- Example: GET, POST, PATCH, DELETE or UPDATE
endpoint: str - API endpoint, do not include the URL base
- Example: /oauth2/revoke
headers: dict - HTTP headers to send to the API
- Example: {"AdditionalHeader": "AdditionalValue"}
params: dict - HTTP query string parameters to send to the API
- Example: {"limit": 1, "sort": "state.asc"}
body: dict - HTTP body payload to send to the API
- Example: {"ids": "123456789abcdefg,987654321zyxwvutsr"}
verify: bool - Enable / Disable SSL certificate checks
- Example: True
data - Encoded data to send to the API
- Example: PAYLOAD = open(FILENAME, 'rb').read()
files: list - List of files to upload
- Example: [('file',('testfile2.jpg',open('testfile2.jpg','rb'),'image/jpeg'))]
body_validator: dict - Dictionary containing payload to be validated for the requested operation (key / datatype)
- Example: { "limit": int, "offset": int, "filter": str}
body_required: list - List of payload parameters required by the requested operation
- Example: ["ids"]
proxy: dict - Dictionary containing a list of proxies to use for requests
- Example: {"https": "https://myproxy.com:4000", "http": "http://myhttpproxy:80"}
timeout: float or tuple
Float representing the global timeout for requests or a tuple containing the connect / read timeouts.
- Example: 30
- Example: (5.05, 25)
"""
method = kwargs.get("method", "GET")
body = kwargs.get("body", None)
body_validator = kwargs.get("body_validator", None)
perform = True
if method.upper() in _ALLOWED_METHODS:
# Validate parameters
# 05.21.21/JSH - Param validation is now handled by the updated args_to_params method
# Validate body payload
if body_validator:
try:
validate_payload(body_validator, body, kwargs.get("body_required", None))
except ValueError as err:
returned = generate_error_result(message=f"{str(err)}")
perform = False
except TypeError as err:
returned = generate_error_result(message=f"{str(err)}")
perform = False
# Perform the request
if perform:
headers["User-Agent"] = _USER_AGENT # Force all requests to pass the User-Agent identifier
try:
response = requests.request(method.upper(), endpoint, params=kwargs.get("params", None),
headers=headers, json=kwargs.get("body", None), data=kwargs.get("data", None),
files=kwargs.get("files", []), verify=kwargs.get("verify", True),
proxies=kwargs.get("proxy", None), timeout=kwargs.get("timeout", None)
)
if response.headers.get('content-type') == "application/json":
returned = Result()(response.status_code, response.headers, response.json())
else:
returned = response.content
except Exception as err: # pylint: disable=W0703 # General catch-all for anything coming out of requests
returned = generate_error_result(message=f"{str(err)}")
else:
returned = generate_error_result(message="Invalid API operation specified.", code=405)
return returned
def generate_error_result(message: str = "An error has occurred. Check your payloads and try again.", code: int = 500) -> dict:
"""
Normalized error messaging handler.
"""
return Result()(status_code=code, headers={}, body={"errors": [{"message": f"{message}"}], "resources": []})
def generate_ok_result(message: str = "Request returned with success", code: int = 200) -> dict:
"""
Normalized OK messaging handler.
"""
return Result()(status_code=code, headers={}, body={"message": message, "resources": []})
def get_default(types: list, position: int):
"""
I determine the requested default data type and return it.
"""
default_value_names = ["list", "str", "int", "dict", "bool"]
default_value_types = [[], "", 0, {}, False]
value_count = 0
retval = {} # Default to dictionary data type as that is our most often used
for type_ in default_value_names:
try:
if type_ in types[position]:
retval = default_value_types[value_count]
except IndexError:
# Data type not specified, fall back to dictionary
pass
value_count += 1
return retval
def calc_url_from_args(target_url: str, passed_args: dict) -> str:
"""
This function reviews arguments passed to the Uber class command method and updates the target URL accordingly.
"""
if "ids" in passed_args:
id_list = str(parse_id_list(passed_args['ids'])).replace(",", "&ids=")
target_url = target_url.format(id_list)
if "action_name" in passed_args:
delim = "&" if "?" in target_url else "?"
# Additional action_name restrictions?
target_url = f"{target_url}{delim}action_name={str(passed_args["action_name"])}"
if "partition" in passed_args:
target_url = target_url.format(str(passed_args['partition']))
if "file_name" in passed_args:
delim = "&" if "?" in target_url else "?"
target_url = f"{target_url}{delim}file_name={str(passed_args["file_name"])}"
return target_url
def args_to_params(payload: dict, passed_arguments: dict, endpoints: list, epname: str) -> dict:
"""
This function reviews arguments passed to the function against arguments accepted by the endpoint.
If a valid argument is passed, it is added and returned as part of the payload dictionary.
This function will convert passed comma-delimited strings to list data types when necessary.
"""
for arg in passed_arguments:
eps = [ep[5] for ep in endpoints if epname in ep[0]][0]
try:
argument = [param for param in eps if param["name"] == arg][0]
if argument:
arg_name = argument["name"]
if argument["type"] == "array":
if isinstance(passed_arguments[arg_name], (str)):
passed_arguments[arg_name] = passed_arguments[arg_name].split(",")
# More data type validation can go here
payload[arg_name] = passed_arguments[arg_name]
except IndexError:
# Unrecognized argument
pass
return payload
def process_service_request(calling_object: object,
endpoints: list,
operation_id: str,
**kwargs
):
"""
Performs a request originating from a service class module.
Calculates the target_url based upon the provided operation ID and endpoint list.
PARAMETERS:
endpoints: list - List of service class endpoints, defined as Endpoints in a service class. [required]
operation_id: The name of the operation ID. Normally this is also the function name from the service class. [required]
method: HTTP method to execute. GET, POST, PATCH, DELETE, PUT accepted. Defaults to GET.
keywords: Dictionary of kwargs that were passed to the function within the service class.
params: Dictionary of parameters passed to the service class function.
headers: Dictionary of headers passed to and calculated by the service class function.
body: Dictionary representing the body payload passed to the service class function.
data: Dictionary representing the data payload passed to the service class function.
files: List of files to be uploaded.
"""
# ID replacement happening at the end of this statement planned for removal in v0.5.6+
# (after all classes have been updated to no longer need it and it has been removed from the _endpoints module)
target_url = f"{calling_object.base_url}{[ep[2] for ep in endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
# Retrieve our keyword arguments
passed_keywords = kwargs.get("keywords", None)
passed_params = kwargs.get("params", None)
parameter_payload = None
if passed_keywords or passed_params:
parameter_payload = args_to_params(passed_params, passed_keywords, endpoints, operation_id)
passed_headers = kwargs.get("headers", None) if kwargs.get("headers", None) else calling_object.headers
new_keywords = {
"caller": calling_object,
"method": kwargs.get("method", "GET"), # Default to GET.
"endpoint": target_url,
"verify": calling_object.ssl_verify,
"headers": passed_headers,
"params": parameter_payload,
"body": kwargs.get("body", None),
"data": kwargs.get("data", None),
"files": kwargs.get("files", None)
}
return service_request(**new_keywords)
| """
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
_util - Internal utilities library
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
import base64
import functools
# pylint: disable=E0401 # Pylint might not have these in our path
import requests
import urllib3
from urllib3.exceptions import InsecureRequestWarning
from ._version import _TITLE, _VERSION
from ._result import Result
urllib3.disable_warnings(InsecureRequestWarning)
# Restrict requests to only allowed HTTP methods
_ALLOWED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'UPDATE']
# Default user-agent string
_USER_AGENT = f"{_TITLE}/{str(_VERSION)}"
def validate_payload(validator: dict, params: dict, required: list = None) -> bool:
"""
Validates parameters and body payloads sent to the API.
"""
# Repurposed with permission from https://github.com/yaleman/crowdstrike_api
# __
# ( (\
# \ =\
# __\_ `--\
# (____))( \----
# (____)) _ Thanks
# (____)) James!
# (____))____/----
if required:
for key in required:
if key not in params:
raise ValueError(f"Argument {key} must be specified.")
for key in params:
if key not in validator:
raise ValueError(f"{key} is not a valid argument.")
if not isinstance(params[key], validator[key]):
raise TypeError(f"{key} is not the valid type. Should be: {validator[key]}, was {type(params[key])}")
return True
def parse_id_list(id_list) -> str:
"""
Converts a list of IDs to a comma-delimited string.
"""
if isinstance(id_list, list):
returned = ""
for string in id_list:
if len(returned) > 1:
returned += ","
returned += str(string)
else:
returned = id_list
return returned
def generate_b64cred(client_id: str, client_secret: str) -> str:
"""
base64 encodes passed client_id and client_secret for authorization headers.
"""
cred = "{}:{}".format(client_id, client_secret)
b64_byt = base64.b64encode(cred.encode("ascii"))
encoded = b64_byt.decode("ascii")
return encoded
def force_default(defaults: list, default_types: list = None):
"""
This function forces default values and is designed to decorate other functions.
defaults = list of values to default
default_types = list of types to default the values to
Example: @force_default(defaults=["parameters], default_types=["dict"])
"""
if not default_types:
default_types = []
def wrapper(func):
"""Inner wrapper."""
@functools.wraps(func)
def factory(*args, **kwargs):
"""
This method is a factory and runs through arguments passed to the called function,
setting defaults on values within the **kwargs dictionary when necessary
as specified in our "defaults" list that is passed to the parent wrapper.
"""
element_count = 0 # Tracker so we can retrieve matching data types
# Loop through every element specified in our defaults list
for element in defaults:
if element in kwargs:
# It exists but it's a NoneType
if kwargs.get(element) is None:
kwargs[element] = get_default(default_types, element_count)
else:
# Not present whatsoever
kwargs[element] = get_default(default_types, element_count)
# Increment our tracker for our sibling default_types list
element_count += 1
return func(*args, **kwargs)
return factory
return wrapper
def service_request(caller: object = None, **kwargs) -> object: # May return dict or object datatypes
"""
Checks for token expiration, refreshing if possible and then performs the request.
"""
if caller:
try:
if caller.auth_object:
if caller.auth_object.token_expired():
auth_response = caller.auth_object.token()
if auth_response["status_code"] == 201:
caller.headers['Authorization'] = 'Bearer {}'.format(auth_response['body']['access_token'])
else:
caller.headers['Authorization'] = 'Bearer '
except AttributeError:
pass
try:
proxy = caller.proxy
except AttributeError:
proxy = None
try:
timeout = caller.timeout
except AttributeError:
timeout = None
returned = perform_request(proxy=proxy, timeout=timeout, **kwargs)
return returned
@force_default(defaults=["headers"], default_types=["dict"])
def perform_request(endpoint: str = "", headers: dict = None, **kwargs) -> object: # May return dict or object datatypes
"""
Leverages the requests library to perform the requested CrowdStrike OAuth2 API operation.
method: str - HTTP method to use when communicating with the API
- Example: GET, POST, PATCH, DELETE or UPDATE
endpoint: str - API endpoint, do not include the URL base
- Example: /oauth2/revoke
headers: dict - HTTP headers to send to the API
- Example: {"AdditionalHeader": "AdditionalValue"}
params: dict - HTTP query string parameters to send to the API
- Example: {"limit": 1, "sort": "state.asc"}
body: dict - HTTP body payload to send to the API
- Example: {"ids": "123456789abcdefg,987654321zyxwvutsr"}
verify: bool - Enable / Disable SSL certificate checks
- Example: True
data - Encoded data to send to the API
- Example: PAYLOAD = open(FILENAME, 'rb').read()
files: list - List of files to upload
- Example: [('file',('testfile2.jpg',open('testfile2.jpg','rb'),'image/jpeg'))]
body_validator: dict - Dictionary containing payload to be validated for the requested operation (key / datatype)
- Example: { "limit": int, "offset": int, "filter": str}
body_required: list - List of payload parameters required by the requested operation
- Example: ["ids"]
proxy: dict - Dictionary containing a list of proxies to use for requests
- Example: {"https": "https://myproxy.com:4000", "http": "http://myhttpproxy:80"}
timeout: float or tuple
Float representing the global timeout for requests or a tuple containing the connect / read timeouts.
- Example: 30
- Example: (5.05, 25)
"""
method = kwargs.get("method", "GET")
body = kwargs.get("body", None)
body_validator = kwargs.get("body_validator", None)
perform = True
if method.upper() in _ALLOWED_METHODS:
# Validate parameters
# 05.21.21/JSH - Param validation is now handled by the updated args_to_params method
# Validate body payload
if body_validator:
try:
validate_payload(body_validator, body, kwargs.get("body_required", None))
except ValueError as err:
returned = generate_error_result(message=f"{str(err)}")
perform = False
except TypeError as err:
returned = generate_error_result(message=f"{str(err)}")
perform = False
# Perform the request
if perform:
headers["User-Agent"] = _USER_AGENT # Force all requests to pass the User-Agent identifier
try:
response = requests.request(method.upper(), endpoint, params=kwargs.get("params", None),
headers=headers, json=kwargs.get("body", None), data=kwargs.get("data", None),
files=kwargs.get("files", []), verify=kwargs.get("verify", True),
proxies=kwargs.get("proxy", None), timeout=kwargs.get("timeout", None)
)
if response.headers.get('content-type') == "application/json":
returned = Result()(response.status_code, response.headers, response.json())
else:
returned = response.content
except Exception as err: # pylint: disable=W0703 # General catch-all for anything coming out of requests
returned = generate_error_result(message=f"{str(err)}")
else:
returned = generate_error_result(message="Invalid API operation specified.", code=405)
return returned
def generate_error_result(message: str = "An error has occurred. Check your payloads and try again.", code: int = 500) -> dict:
"""
Normalized error messaging handler.
"""
return Result()(status_code=code, headers={}, body={"errors": [{"message": f"{message}"}], "resources": []})
def generate_ok_result(message: str = "Request returned with success", code: int = 200) -> dict:
"""
Normalized OK messaging handler.
"""
return Result()(status_code=code, headers={}, body={"message": message, "resources": []})
def get_default(types: list, position: int):
"""
I determine the requested default data type and return it.
"""
default_value_names = ["list", "str", "int", "dict", "bool"]
default_value_types = [[], "", 0, {}, False]
value_count = 0
retval = {} # Default to dictionary data type as that is our most often used
for type_ in default_value_names:
try:
if type_ in types[position]:
retval = default_value_types[value_count]
except IndexError:
# Data type not specified, fall back to dictionary
pass
value_count += 1
return retval
def calc_url_from_args(target_url: str, passed_args: dict) -> str:
"""
This function reviews arguments passed to the Uber class command method and updates the target URL accordingly.
"""
if "ids" in passed_args:
id_list = str(parse_id_list(passed_args['ids'])).replace(",", "&ids=")
target_url = target_url.format(id_list)
if "action_name" in passed_args:
delim = "&" if "?" in target_url else "?"
# Additional action_name restrictions?
target_url = f"{target_url}{delim}action_name={str(passed_args['action_name'])}"
if "partition" in passed_args:
target_url = target_url.format(str(passed_args['partition']))
if "file_name" in passed_args:
delim = "&" if "?" in target_url else "?"
target_url = f"{target_url}{delim}file_name={str(passed_args['file_name'])}"
return target_url
def args_to_params(payload: dict, passed_arguments: dict, endpoints: list, epname: str) -> dict:
"""
This function reviews arguments passed to the function against arguments accepted by the endpoint.
If a valid argument is passed, it is added and returned as part of the payload dictionary.
This function will convert passed comma-delimited strings to list data types when necessary.
"""
for arg in passed_arguments:
eps = [ep[5] for ep in endpoints if epname in ep[0]][0]
try:
argument = [param for param in eps if param["name"] == arg][0]
if argument:
arg_name = argument["name"]
if argument["type"] == "array":
if isinstance(passed_arguments[arg_name], (str)):
passed_arguments[arg_name] = passed_arguments[arg_name].split(",")
# More data type validation can go here
payload[arg_name] = passed_arguments[arg_name]
except IndexError:
# Unrecognized argument
pass
return payload
def process_service_request(calling_object: object,
endpoints: list,
operation_id: str,
**kwargs
):
"""
Performs a request originating from a service class module.
Calculates the target_url based upon the provided operation ID and endpoint list.
PARAMETERS:
endpoints: list - List of service class endpoints, defined as Endpoints in a service class. [required]
operation_id: The name of the operation ID. Normally this is also the function name from the service class. [required]
method: HTTP method to execute. GET, POST, PATCH, DELETE, PUT accepted. Defaults to GET.
keywords: Dictionary of kwargs that were passed to the function within the service class.
params: Dictionary of parameters passed to the service class function.
headers: Dictionary of headers passed to and calculated by the service class function.
body: Dictionary representing the body payload passed to the service class function.
data: Dictionary representing the data payload passed to the service class function.
files: List of files to be uploaded.
"""
# ID replacement happening at the end of this statement planned for removal in v0.5.6+
# (after all classes have been updated to no longer need it and it has been removed from the _endpoints module)
target_url = f"{calling_object.base_url}{[ep[2] for ep in endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
# Retrieve our keyword arguments
passed_keywords = kwargs.get("keywords", None)
passed_params = kwargs.get("params", None)
parameter_payload = None
if passed_keywords or passed_params:
parameter_payload = args_to_params(passed_params, passed_keywords, endpoints, operation_id)
passed_headers = kwargs.get("headers", None) if kwargs.get("headers", None) else calling_object.headers
new_keywords = {
"caller": calling_object,
"method": kwargs.get("method", "GET"), # Default to GET.
"endpoint": target_url,
"verify": calling_object.ssl_verify,
"headers": passed_headers,
"params": parameter_payload,
"body": kwargs.get("body", None),
"data": kwargs.get("data", None),
"files": kwargs.get("files", None)
}
return service_request(**new_keywords)
|
"""
Base settings to build other settings files upon.
"""
import environ
from django.contrib import admin
ROOT_DIR = environ.Path(__file__) - 3 # (bootcamp/config/settings/base.py - 3 = bootcamp/)
APPS_DIR = ROOT_DIR.path('bootcamp')
env = environ.Env()
env.read_env(str(ROOT_DIR.path('.env')))
# READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
# if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
# env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {
# 'default': env.db('DATABASE_URL'),
# }
# DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admin',
'django.forms',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'sorl.thumbnail',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.amazon',
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.linkedin',
# 'allauth.socialaccount.providers.slack',
'channels',
'django_comments',
'graphene_django',
'markdownx',
'taggit',
]
LOCAL_APPS = [
'bootcamp.users.apps.UsersConfig',
# Your stuff: custom apps go here
'bootcamp.articles.apps.ArticlesConfig',
'bootcamp.messager.apps.MessagerConfig',
'bootcamp.news.apps.NewsConfig',
'bootcamp.notifications.apps.NotificationsConfig',
'bootcamp.qa.apps.QaConfig',
'bootcamp.search.apps.SearchConfig'
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
FORM_RENDERER = 'django.forms.renderers.TemplatesSetting'
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'bootcamp.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'news:list'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = r'^admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
# ADMINS = [
# ("""Vitor Freitas""", 'vitor-freitas@example.com'),
# ]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
# MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'bootcamp.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'bootcamp.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
# REDIS setup
REDIS_URL = f'{env('REDIS_URL', default='redis://127.0.0.1:6379')}/{0}'
# django-channels setup
ASGI_APPLICATION = 'config.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [REDIS_URL, ],
},
}
}
# GraphQL settings
GRAPHENE = {
'SCHEMA': 'bootcamp.schema.schema'
}
| """
Base settings to build other settings files upon.
"""
import environ
from django.contrib import admin
ROOT_DIR = environ.Path(__file__) - 3 # (bootcamp/config/settings/base.py - 3 = bootcamp/)
APPS_DIR = ROOT_DIR.path('bootcamp')
env = environ.Env()
env.read_env(str(ROOT_DIR.path('.env')))
# READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
# if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
# env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {
# 'default': env.db('DATABASE_URL'),
# }
# DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.admin',
'django.forms',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'sorl.thumbnail',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.amazon',
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.linkedin',
# 'allauth.socialaccount.providers.slack',
'channels',
'django_comments',
'graphene_django',
'markdownx',
'taggit',
]
LOCAL_APPS = [
'bootcamp.users.apps.UsersConfig',
# Your stuff: custom apps go here
'bootcamp.articles.apps.ArticlesConfig',
'bootcamp.messager.apps.MessagerConfig',
'bootcamp.news.apps.NewsConfig',
'bootcamp.notifications.apps.NotificationsConfig',
'bootcamp.qa.apps.QaConfig',
'bootcamp.search.apps.SearchConfig'
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
FORM_RENDERER = 'django.forms.renderers.TemplatesSetting'
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'bootcamp.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'news:list'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = r'^admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
# ADMINS = [
# ("""Vitor Freitas""", 'vitor-freitas@example.com'),
# ]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
# MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'bootcamp.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'bootcamp.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
# REDIS setup
REDIS_URL = f'{env("REDIS_URL", default="redis://127.0.0.1:6379")}/{0}'
# django-channels setup
ASGI_APPLICATION = 'config.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [REDIS_URL, ],
},
}
}
# GraphQL settings
GRAPHENE = {
'SCHEMA': 'bootcamp.schema.schema'
}
|
import os
import glob
import numpy as np
import nibabel as nb
import os
import scipy.io as sio
from scipy.stats import pearsonr
PH_SERVER_ROOT = os.environ.get('PH_SERVER_ROOT')
def zscore(data, axis):
data -= data.mean(axis=axis, keepdims=True)
data /= data.std(axis=axis, keepdims=True)
return np.nan_to_num(data, copy=False)
def correlation(matrix1, matrix2):
d1 = matrix1.shape[-1]
d2 = matrix2.shape[-1]
assert d1 == d2
assert matrix1.ndim <= 2
assert matrix2.ndim <= 2
matrix1 = zscore(matrix1.astype(float), matrix1.ndim - 1) / np.sqrt(d1)
matrix2 = zscore(matrix2.astype(float), matrix2.ndim - 1) / np.sqrt(d2)
if matrix1.ndim >= matrix2.ndim:
return np.dot(matrix1, matrix2.T)
else:
return np.dot(matrix2, matrix1.T)
def get_motion_params(file, pipeline = 'cpac'):
data = np.genfromtxt(file).T
if pipeline == 'abcd':
data = np.vstack((data[3:,:],data[:3,:]))
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
else:
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
return data
path1 = f'{os.environ.get('PH_SERVER_WORKING_ROOT')}/CPAC_XCP/ABCD/preprocessed/data'
path2 = f'{os.environ.get('DATA_INPUT_DIR')}/cpac_abcd'
sub_list = list(range(25427,25457))
sub_list.remove(25430)
sub_list.remove(25448)
var_list = ['anat mask', 'CSF', 'GM', 'WM', 'func mask', 'motion',
'anat-mni abcd', 'anat-mni cpac', 'func-mni abcd', 'func-mni cpac',
'func-t1 abcd', 'func-t1 cpac', 'anat-mni', 'func-mni', 'func-t1']
if 'motion' in var_list:
motion_index = var_list.index('motion')
corrs = np.zeros((len(sub_list), len(var_list)+5))
for num_sub, sub in enumerate(sub_list):
sub = '00'+str(sub)
path_list1 = [path1+'/sub-'+sub+'/ses-1/files/T1w/brainmask_fs.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_1.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_2.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/brainmask_fs.2.0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/MotionCorrection/task-rest01_mc.par',
# path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/DCANBOLDProc_v4.0.0/FD.mat',
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz', # ABCD func in T1 space
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0], # C-PAC func in T1 space
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz'] # ABCD func in T1 space
path_list2 = [path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-T1w_desc-brain_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-CSF_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-GM_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-WM_mask.nii.gz',
path2+'/working/cpac_sub-'+sub+'a_ses-1/resample_anat_brain_mask_in_standard_125/wmparc_maths_fill_holes_maths_warp_warp_warp.nii.gz',
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/_*/*mcflirt_122/*par')[0],
# glob.glob(path2+'/sub-'+sub+'/output/*/sub-'+sub+ses+'_ses-1/frame_wise_displacement_power/*/FD.1D')[0], # TODO find FD, only max/rel disp
# Note: this template is from DCAN-HCP GitHub: https://github.com/DCAN-Labs/DCAN-HCP/tree/master/global/templates/MNI152_T1_1mm_brain.nii.gz
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # ABCD anat template
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # C-PAC anat template
# Note: this template is from FSL standard template distribution
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Lisa
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Lisa
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Ned
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Ned
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_acpc_dc_restore_brain.nii.gz', # ABCD T1
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/brain_extraction_*/*.nii.gz')[0], # C-PAC T1
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0]] # C-PAC func in T1 space
for num_var, var in enumerate(var_list):
file1 = path_list1[num_var]
file2 = path_list2[num_var]
if '.nii.gz' in file1:
img1 = nb.load(file1)
data1 = img1.get_fdata()
# data1 = img1.get_data()
img2 = nb.load(file2)
data2 = img2.get_fdata()
# data2 = img2.get_data()
elif '.par' in file1:
data1 = get_motion_params(file1, 'abcd')
data2 = get_motion_params(file2)
elif '.mat' in file1:
data1 = sio.loadmat(file1)['FD']
data2 = np.expand_dims(np.loadtxt(file2)[1:], axis=1)
if var == 'motion':
motion_params = correlation(data1, data2)
corr = motion_params.diagonal()
elif isinstance(data1, np.ndarray) and data1.shape == data2.shape:
corr, _ = pearsonr(data1.flatten(), data2.flatten())
print(sub + ' ' + str(num_var) + ' ' + var)
print(corr)
if num_var < motion_index:
corrs[num_sub][num_var] = round(corr, 3)
elif num_var == motion_index:
corrs[num_sub][num_var:num_var+6] = corr
elif num_var > motion_index:
corrs[num_sub][num_var+5] = round(corr, 3)
print(corrs)
np.save(f'{os.environ.get('SCRIPT_DIR')}/abcd_corrs.npy', corrs) | import os
import glob
import numpy as np
import nibabel as nb
import os
import scipy.io as sio
from scipy.stats import pearsonr
PH_SERVER_ROOT = os.environ.get('PH_SERVER_ROOT')
def zscore(data, axis):
data -= data.mean(axis=axis, keepdims=True)
data /= data.std(axis=axis, keepdims=True)
return np.nan_to_num(data, copy=False)
def correlation(matrix1, matrix2):
d1 = matrix1.shape[-1]
d2 = matrix2.shape[-1]
assert d1 == d2
assert matrix1.ndim <= 2
assert matrix2.ndim <= 2
matrix1 = zscore(matrix1.astype(float), matrix1.ndim - 1) / np.sqrt(d1)
matrix2 = zscore(matrix2.astype(float), matrix2.ndim - 1) / np.sqrt(d2)
if matrix1.ndim >= matrix2.ndim:
return np.dot(matrix1, matrix2.T)
else:
return np.dot(matrix2, matrix1.T)
def get_motion_params(file, pipeline = 'cpac'):
data = np.genfromtxt(file).T
if pipeline == 'abcd':
data = np.vstack((data[3:,:],data[:3,:]))
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
else:
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
return data
path1 = f'{os.environ.get("PH_SERVER_WORKING_ROOT")}/CPAC_XCP/ABCD/preprocessed/data'
path2 = f'{os.environ.get("DATA_INPUT_DIR")}/cpac_abcd'
sub_list = list(range(25427,25457))
sub_list.remove(25430)
sub_list.remove(25448)
var_list = ['anat mask', 'CSF', 'GM', 'WM', 'func mask', 'motion',
'anat-mni abcd', 'anat-mni cpac', 'func-mni abcd', 'func-mni cpac',
'func-t1 abcd', 'func-t1 cpac', 'anat-mni', 'func-mni', 'func-t1']
if 'motion' in var_list:
motion_index = var_list.index('motion')
corrs = np.zeros((len(sub_list), len(var_list)+5))
for num_sub, sub in enumerate(sub_list):
sub = '00'+str(sub)
path_list1 = [path1+'/sub-'+sub+'/ses-1/files/T1w/brainmask_fs.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_1.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_2.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/brainmask_fs.2.0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/MotionCorrection/task-rest01_mc.par',
# path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/DCANBOLDProc_v4.0.0/FD.mat',
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz', # ABCD func in T1 space
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0], # C-PAC func in T1 space
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz'] # ABCD func in T1 space
path_list2 = [path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-T1w_desc-brain_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-CSF_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-GM_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-WM_mask.nii.gz',
path2+'/working/cpac_sub-'+sub+'a_ses-1/resample_anat_brain_mask_in_standard_125/wmparc_maths_fill_holes_maths_warp_warp_warp.nii.gz',
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/_*/*mcflirt_122/*par')[0],
# glob.glob(path2+'/sub-'+sub+'/output/*/sub-'+sub+ses+'_ses-1/frame_wise_displacement_power/*/FD.1D')[0], # TODO find FD, only max/rel disp
# Note: this template is from DCAN-HCP GitHub: https://github.com/DCAN-Labs/DCAN-HCP/tree/master/global/templates/MNI152_T1_1mm_brain.nii.gz
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # ABCD anat template
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # C-PAC anat template
# Note: this template is from FSL standard template distribution
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Lisa
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Lisa
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Ned
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Ned
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_acpc_dc_restore_brain.nii.gz', # ABCD T1
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/brain_extraction_*/*.nii.gz')[0], # C-PAC T1
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0]] # C-PAC func in T1 space
for num_var, var in enumerate(var_list):
file1 = path_list1[num_var]
file2 = path_list2[num_var]
if '.nii.gz' in file1:
img1 = nb.load(file1)
data1 = img1.get_fdata()
# data1 = img1.get_data()
img2 = nb.load(file2)
data2 = img2.get_fdata()
# data2 = img2.get_data()
elif '.par' in file1:
data1 = get_motion_params(file1, 'abcd')
data2 = get_motion_params(file2)
elif '.mat' in file1:
data1 = sio.loadmat(file1)['FD']
data2 = np.expand_dims(np.loadtxt(file2)[1:], axis=1)
if var == 'motion':
motion_params = correlation(data1, data2)
corr = motion_params.diagonal()
elif isinstance(data1, np.ndarray) and data1.shape == data2.shape:
corr, _ = pearsonr(data1.flatten(), data2.flatten())
print(sub + ' ' + str(num_var) + ' ' + var)
print(corr)
if num_var < motion_index:
corrs[num_sub][num_var] = round(corr, 3)
elif num_var == motion_index:
corrs[num_sub][num_var:num_var+6] = corr
elif num_var > motion_index:
corrs[num_sub][num_var+5] = round(corr, 3)
print(corrs)
np.save(f'{os.environ.get("SCRIPT_DIR")}/abcd_corrs.npy', corrs) |
"""Indy SDK verifier implementation."""
import json
import logging
import indy.anoncreds
from indy.error import IndyError
from ...ledger.indy import IndySdkLedger
from ..verifier import IndyVerifier
LOGGER = logging.getLogger(__name__)
class IndySdkVerifier(IndyVerifier):
"""Indy-SDK verifier implementation."""
def __init__(self, ledger: IndySdkLedger):
"""
Initialize an IndyVerifier instance.
Args:
ledger: ledger instance
"""
self.ledger = ledger
async def verify_presentation(
self,
pres_req,
pres,
schemas,
credential_definitions,
rev_reg_defs,
rev_reg_entries,
) -> bool:
"""
Verify a presentation.
Args:
pres_req: Presentation request data
pres: Presentation data
schemas: Schema data
credential_definitions: credential definition data
rev_reg_defs: revocation registry definitions
rev_reg_entries: revocation registry entries
"""
try:
self.non_revoc_intervals(pres_req, pres)
await self.check_timestamps(self.ledger, pres_req, pres, rev_reg_defs)
await self.pre_verify(pres_req, pres)
except ValueError as err:
LOGGER.error(
f"Presentation on nonce={pres_req["nonce"]} "
f"cannot be validated: {str(err)}"
)
return False
try:
verified = await indy.anoncreds.verifier_verify_proof(
json.dumps(pres_req),
json.dumps(pres),
json.dumps(schemas),
json.dumps(credential_definitions),
json.dumps(rev_reg_defs),
json.dumps(rev_reg_entries),
)
except IndyError:
LOGGER.exception(
f"Validation of presentation on nonce={pres_req["nonce"]} "
"failed with error"
)
verified = False
return verified
| """Indy SDK verifier implementation."""
import json
import logging
import indy.anoncreds
from indy.error import IndyError
from ...ledger.indy import IndySdkLedger
from ..verifier import IndyVerifier
LOGGER = logging.getLogger(__name__)
class IndySdkVerifier(IndyVerifier):
"""Indy-SDK verifier implementation."""
def __init__(self, ledger: IndySdkLedger):
"""
Initialize an IndyVerifier instance.
Args:
ledger: ledger instance
"""
self.ledger = ledger
async def verify_presentation(
self,
pres_req,
pres,
schemas,
credential_definitions,
rev_reg_defs,
rev_reg_entries,
) -> bool:
"""
Verify a presentation.
Args:
pres_req: Presentation request data
pres: Presentation data
schemas: Schema data
credential_definitions: credential definition data
rev_reg_defs: revocation registry definitions
rev_reg_entries: revocation registry entries
"""
try:
self.non_revoc_intervals(pres_req, pres)
await self.check_timestamps(self.ledger, pres_req, pres, rev_reg_defs)
await self.pre_verify(pres_req, pres)
except ValueError as err:
LOGGER.error(
f"Presentation on nonce={pres_req['nonce']} "
f"cannot be validated: {str(err)}"
)
return False
try:
verified = await indy.anoncreds.verifier_verify_proof(
json.dumps(pres_req),
json.dumps(pres),
json.dumps(schemas),
json.dumps(credential_definitions),
json.dumps(rev_reg_defs),
json.dumps(rev_reg_entries),
)
except IndyError:
LOGGER.exception(
f"Validation of presentation on nonce={pres_req['nonce']} "
"failed with error"
)
verified = False
return verified
|
###
### Copyright (C) 2021 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
import os
import slash
from ...lib.common import get_media, timefn, call, exe2os, filepath2os
from ...lib.ffmpeg.util import have_ffmpeg, BaseFormatMapper
from ...lib.parameters import format_value
from ...lib.util import skip_test_if_missing_features
from ...lib.metrics import md5, calculate_psnr
@slash.requires(have_ffmpeg)
class BaseEncoderTest(slash.Test, BaseFormatMapper):
def before(self):
super().before()
self.refctx = []
self.renderDevice = get_media().render_device
self.post_validate = lambda: None
def map_profile(self):
raise NotImplementedError
def gen_qp_opts(self):
raise NotImplementedError
def gen_quality_opts(self):
raise NotImplementedError
def get_file_ext(self):
raise NotImplementedError
def gen_input_opts(self):
opts = "-f rawvideo -pix_fmt {mformat} -s:v {width}x{height}"
if vars(self).get("fps", None) is not None:
opts += " -r:v {fps}"
opts += f" -i {filepath2os(self.source)}"
return opts
def gen_output_opts(self):
opts = "-vf 'format={hwformat},hwupload"
if vars(self).get("hwframes", None) is not None:
opts += "=extra_hw_frames={hwframes}"
opts += "' -an -c:v {ffencoder}"
if vars(self).get("profile", None) is not None:
opts += " -profile:v {mprofile}"
if vars(self).get("rcmodeu", None) is not None:
opts += " -rc_mode {rcmodeu}"
if vars(self).get("qp", None) is not None:
opts += self.gen_qp_opts()
if vars(self).get("quality", None) is not None:
opts += self.gen_quality_opts()
if vars(self).get("gop", None) is not None:
opts += " -g {gop}"
if vars(self).get("extbrc", None) is not None:
opts += " -extbrc {extbrc}"
if vars(self).get("slices", None) is not None:
opts += " -slices {slices}"
if vars(self).get("tilecols", None) is not None:
opts += " -tile_cols {tilecols}"
if vars(self).get("tilerows", None) is not None:
opts += " -tile_rows {tilerows}"
if vars(self).get("bframes", None) is not None:
opts += " -bf {bframes}"
if vars(self).get("minrate", None) is not None:
opts += " -b:v {minrate}k"
if vars(self).get("maxrate", None) is not None:
opts += " -maxrate {maxrate}k"
if vars(self).get("refs", None) is not None:
opts += " -refs {refs}"
if vars(self).get("lowpower", None) is not None:
opts += " -low_power {lowpower}"
if vars(self).get("loopshp", None) is not None:
opts += " -loop_filter_sharpness {loopshp}"
if vars(self).get("looplvl", None) is not None:
opts += " -loop_filter_level {looplvl}"
if vars(self).get("level", None) is not None:
self.level /= 10.0
opts += " -level {level}"
if vars(self).get("ladepth", None) is not None:
opts += " -look_ahead 1"
opts += " -look_ahead_depth {ladepth}"
if vars(self).get("vforced_idr", None) is not None:
opts += " -force_key_frames expr:1 -forced_idr 1"
# WA: LDB is not enabled by default for HEVCe on gen11+, yet.
if get_media()._get_gpu_gen() >= 11 and self.codec.startswith("hevc"):
opts += " -b_strategy 1"
opts += " -vframes {frames} -y {osencoded}"
return opts
def gen_name(self):
name = "{case}-{rcmode}"
if vars(self).get("profile", None) is not None:
name += "-{profile}"
if vars(self).get("fps", None) is not None:
name += "-{fps}"
if vars(self).get("gop", None) is not None:
name += "-{gop}"
if vars(self).get("extbrc", None) is not None:
name += "-extbrc{extbrc}"
if vars(self).get("qp", None) is not None:
name += "-{qp}"
if vars(self).get("slices", None) is not None:
name += "-{slices}"
if vars(self).get("quality", None) is not None:
name += "-{quality}"
if vars(self).get("bframes", None) is not None:
name += "-{bframes}"
if vars(self).get("minrate", None) is not None:
name += "-{minrate}k"
if vars(self).get("maxrate", None) is not None:
name += "-{maxrate}k"
if vars(self).get("refs", None) is not None:
name += "-{refs}"
if vars(self).get("lowpower", None) is not None:
name += "-{lowpower}"
if vars(self).get("loopshp", None) is not None:
name += "-{loopshp}"
if vars(self).get("looplvl", None) is not None:
name += "-{looplvl}"
if vars(self).get("ladepth", None) is not None:
name += "-{ladepth}"
if vars(self).get("vforced_idr", None) is not None:
name += "-{vforced_idr}"
if vars(self).get("level", None) is not None:
name += "-{level}"
if vars(self).get("r2r", None) is not None:
name += "-r2r"
return name
def validate_caps(self):
ifmts = self.caps["fmts"]
## BUG: It appears there's a ffmpeg bug with yuv420p hwupload when using
## i965 driver. Need to report upstream ffmpeg!
if "i965" == get_media()._get_driver_name():
ifmts = list(set(ifmts) - set(["I420"]))
self.hwformat = self.map_best_hw_format(self.format, ifmts)
self.mformat = self.map_format(self.format)
if None in [self.hwformat, self.mformat]:
slash.skip_test("{format} not supported".format(**vars(self)))
skip_test_if_missing_features(self)
maxw, maxh = self.caps["maxres"]
if self.width > maxw or self.height > maxh:
slash.skip_test(
format_value(
"{platform}.{driver}.{width}x{height} not supported", **vars(self)))
if vars(self).get("slices", 1) > 1 and not self.caps.get("multislice", True):
slash.skip_test(
format_value(
"{platform}.{driver}.slice > 1 unsupported in this mode", **vars(self)))
if not self.caps.get(self.rcmode, True):
slash.skip_test(
format_value(
"{platform}.{driver}.{rcmode} unsupported in this mode", **vars(self)))
if vars(self).get("profile", None) is not None:
self.mprofile = self.map_profile()
if self.mprofile is None:
slash.skip_test("{profile} profile is not supported".format(**vars(self)))
self.post_validate()
@timefn("ffmpeg")
def call_ffmpeg(self, iopts, oopts):
return call(
(
f"{exe2os("ffmpeg")}"
" -hwaccel {hwaccel} -init_hw_device {hwaccel}=hw:{renderDevice}"
" -hwaccel_output_format {hwaccel}"
).format(**vars(self)) + (
" -v verbose {iopts} {oopts}"
).format(iopts = iopts, oopts = oopts)
)
def encode(self):
self.validate_caps()
get_media().test_call_timeout = vars(self).get("call_timeout", 0)
name = self.gen_name().format(**vars(self))
ext = self.get_file_ext()
self.encoded = get_media()._test_artifact("{}.{}".format(name, ext))
self.osencoded = filepath2os(self.encoded)
iopts = self.gen_input_opts()
oopts = self.gen_output_opts()
self.output = self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
if vars(self).get("r2r", None) is not None:
assert type(self.r2r) is int and self.r2r > 1, "invalid r2r value"
md5ref = md5(self.encoded)
get_media()._set_test_details(md5_ref = md5ref)
for i in range(1, self.r2r):
self.encoded = get_media()._test_artifact("{}_{}.{}".format(name, i, ext))
self.osencoded = filepath2os(self.encoded)
self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
result = md5(self.encoded)
get_media()._set_test_details(**{"md5_{:03}".format(i) : result})
assert result == md5ref, "r2r md5 mismatch"
# delete encoded file after each iteration
get_media()._purge_test_artifact(self.encoded)
else:
self.check_output()
self.check_bitrate()
self.check_metrics()
self.check_level()
self.check_forced_idr()
def check_output(self):
pass
def check_bitrate(self):
encsize = os.path.getsize(self.encoded)
bitrate_actual = encsize * 8 * vars(self).get("fps", 25) / 1024.0 / self.frames
get_media()._set_test_details(
size_encoded = encsize,
bitrate_actual = "{:-.2f}".format(bitrate_actual))
if "cbr" == self.rcmode:
bitrate_gap = abs(bitrate_actual - self.bitrate) / self.bitrate
get_media()._set_test_details(bitrate_gap = "{:.2%}".format(bitrate_gap))
# acceptable bitrate within 10% of bitrate
assert(bitrate_gap <= 0.10)
elif "vbr" == self.rcmode:
# acceptable bitrate within 25% of minrate and 10% of maxrate
assert(self.minrate * 0.75 <= bitrate_actual <= self.maxrate * 1.10)
def check_metrics(self):
iopts = ""
if vars(self).get("ffdecoder", None) is not None:
iopts += "-c:v {ffdecoder} "
iopts += "-i {osencoded}"
name = (self.gen_name() + "-{width}x{height}-{format}").format(**vars(self))
self.decoded = get_media()._test_artifact("{}.yuv".format(name))
oopts = (
"-vf 'hwdownload,format={hwformat}' -pix_fmt {mformat} -f rawvideo"
" -vsync passthrough -vframes {frames}"
f" -y {filepath2os(self.decoded)}")
self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
get_media().baseline.check_psnr(
psnr = calculate_psnr(
self.source, self.decoded,
self.width, self.height,
self.frames, self.format),
context = self.refctx,
)
def check_level(self):
if vars(self).get("level", None) is None:
return
output = call(
f"{exe2os("ffprobe")}"
" -i {osencoded} -v quiet -show_entries stream=level"
" -of default=nk=1:nw=1".format(**vars(self)))
assert float(output)/30 == self.level, "fail to set target level"
def check_forced_idr(self):
if vars(self).get("vforced_idr", None) is None:
return
judge = {"hevc-8" : 19, "avc" : 5}.get(self.codec, None)
assert judge is not None, f"{self.codec} codec not supported for forced_idr"
output = call(
f"{exe2os("ffmpeg")}"
" -v verbose -i {osencoded} -c:v copy"
" -vframes {frames} -bsf:v trace_headers"
" -f null - 2>&1 | grep 'nal_unit_type.*{judge}' | wc -l".format(**vars(self)))
assert str(self.frames) == output.strip(), "It appears that the forced_idr did not work"
| ###
### Copyright (C) 2021 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
import os
import slash
from ...lib.common import get_media, timefn, call, exe2os, filepath2os
from ...lib.ffmpeg.util import have_ffmpeg, BaseFormatMapper
from ...lib.parameters import format_value
from ...lib.util import skip_test_if_missing_features
from ...lib.metrics import md5, calculate_psnr
@slash.requires(have_ffmpeg)
class BaseEncoderTest(slash.Test, BaseFormatMapper):
def before(self):
super().before()
self.refctx = []
self.renderDevice = get_media().render_device
self.post_validate = lambda: None
def map_profile(self):
raise NotImplementedError
def gen_qp_opts(self):
raise NotImplementedError
def gen_quality_opts(self):
raise NotImplementedError
def get_file_ext(self):
raise NotImplementedError
def gen_input_opts(self):
opts = "-f rawvideo -pix_fmt {mformat} -s:v {width}x{height}"
if vars(self).get("fps", None) is not None:
opts += " -r:v {fps}"
opts += f" -i {filepath2os(self.source)}"
return opts
def gen_output_opts(self):
opts = "-vf 'format={hwformat},hwupload"
if vars(self).get("hwframes", None) is not None:
opts += "=extra_hw_frames={hwframes}"
opts += "' -an -c:v {ffencoder}"
if vars(self).get("profile", None) is not None:
opts += " -profile:v {mprofile}"
if vars(self).get("rcmodeu", None) is not None:
opts += " -rc_mode {rcmodeu}"
if vars(self).get("qp", None) is not None:
opts += self.gen_qp_opts()
if vars(self).get("quality", None) is not None:
opts += self.gen_quality_opts()
if vars(self).get("gop", None) is not None:
opts += " -g {gop}"
if vars(self).get("extbrc", None) is not None:
opts += " -extbrc {extbrc}"
if vars(self).get("slices", None) is not None:
opts += " -slices {slices}"
if vars(self).get("tilecols", None) is not None:
opts += " -tile_cols {tilecols}"
if vars(self).get("tilerows", None) is not None:
opts += " -tile_rows {tilerows}"
if vars(self).get("bframes", None) is not None:
opts += " -bf {bframes}"
if vars(self).get("minrate", None) is not None:
opts += " -b:v {minrate}k"
if vars(self).get("maxrate", None) is not None:
opts += " -maxrate {maxrate}k"
if vars(self).get("refs", None) is not None:
opts += " -refs {refs}"
if vars(self).get("lowpower", None) is not None:
opts += " -low_power {lowpower}"
if vars(self).get("loopshp", None) is not None:
opts += " -loop_filter_sharpness {loopshp}"
if vars(self).get("looplvl", None) is not None:
opts += " -loop_filter_level {looplvl}"
if vars(self).get("level", None) is not None:
self.level /= 10.0
opts += " -level {level}"
if vars(self).get("ladepth", None) is not None:
opts += " -look_ahead 1"
opts += " -look_ahead_depth {ladepth}"
if vars(self).get("vforced_idr", None) is not None:
opts += " -force_key_frames expr:1 -forced_idr 1"
# WA: LDB is not enabled by default for HEVCe on gen11+, yet.
if get_media()._get_gpu_gen() >= 11 and self.codec.startswith("hevc"):
opts += " -b_strategy 1"
opts += " -vframes {frames} -y {osencoded}"
return opts
def gen_name(self):
name = "{case}-{rcmode}"
if vars(self).get("profile", None) is not None:
name += "-{profile}"
if vars(self).get("fps", None) is not None:
name += "-{fps}"
if vars(self).get("gop", None) is not None:
name += "-{gop}"
if vars(self).get("extbrc", None) is not None:
name += "-extbrc{extbrc}"
if vars(self).get("qp", None) is not None:
name += "-{qp}"
if vars(self).get("slices", None) is not None:
name += "-{slices}"
if vars(self).get("quality", None) is not None:
name += "-{quality}"
if vars(self).get("bframes", None) is not None:
name += "-{bframes}"
if vars(self).get("minrate", None) is not None:
name += "-{minrate}k"
if vars(self).get("maxrate", None) is not None:
name += "-{maxrate}k"
if vars(self).get("refs", None) is not None:
name += "-{refs}"
if vars(self).get("lowpower", None) is not None:
name += "-{lowpower}"
if vars(self).get("loopshp", None) is not None:
name += "-{loopshp}"
if vars(self).get("looplvl", None) is not None:
name += "-{looplvl}"
if vars(self).get("ladepth", None) is not None:
name += "-{ladepth}"
if vars(self).get("vforced_idr", None) is not None:
name += "-{vforced_idr}"
if vars(self).get("level", None) is not None:
name += "-{level}"
if vars(self).get("r2r", None) is not None:
name += "-r2r"
return name
def validate_caps(self):
ifmts = self.caps["fmts"]
## BUG: It appears there's a ffmpeg bug with yuv420p hwupload when using
## i965 driver. Need to report upstream ffmpeg!
if "i965" == get_media()._get_driver_name():
ifmts = list(set(ifmts) - set(["I420"]))
self.hwformat = self.map_best_hw_format(self.format, ifmts)
self.mformat = self.map_format(self.format)
if None in [self.hwformat, self.mformat]:
slash.skip_test("{format} not supported".format(**vars(self)))
skip_test_if_missing_features(self)
maxw, maxh = self.caps["maxres"]
if self.width > maxw or self.height > maxh:
slash.skip_test(
format_value(
"{platform}.{driver}.{width}x{height} not supported", **vars(self)))
if vars(self).get("slices", 1) > 1 and not self.caps.get("multislice", True):
slash.skip_test(
format_value(
"{platform}.{driver}.slice > 1 unsupported in this mode", **vars(self)))
if not self.caps.get(self.rcmode, True):
slash.skip_test(
format_value(
"{platform}.{driver}.{rcmode} unsupported in this mode", **vars(self)))
if vars(self).get("profile", None) is not None:
self.mprofile = self.map_profile()
if self.mprofile is None:
slash.skip_test("{profile} profile is not supported".format(**vars(self)))
self.post_validate()
@timefn("ffmpeg")
def call_ffmpeg(self, iopts, oopts):
return call(
(
f"{exe2os('ffmpeg')}"
" -hwaccel {hwaccel} -init_hw_device {hwaccel}=hw:{renderDevice}"
" -hwaccel_output_format {hwaccel}"
).format(**vars(self)) + (
" -v verbose {iopts} {oopts}"
).format(iopts = iopts, oopts = oopts)
)
def encode(self):
self.validate_caps()
get_media().test_call_timeout = vars(self).get("call_timeout", 0)
name = self.gen_name().format(**vars(self))
ext = self.get_file_ext()
self.encoded = get_media()._test_artifact("{}.{}".format(name, ext))
self.osencoded = filepath2os(self.encoded)
iopts = self.gen_input_opts()
oopts = self.gen_output_opts()
self.output = self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
if vars(self).get("r2r", None) is not None:
assert type(self.r2r) is int and self.r2r > 1, "invalid r2r value"
md5ref = md5(self.encoded)
get_media()._set_test_details(md5_ref = md5ref)
for i in range(1, self.r2r):
self.encoded = get_media()._test_artifact("{}_{}.{}".format(name, i, ext))
self.osencoded = filepath2os(self.encoded)
self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
result = md5(self.encoded)
get_media()._set_test_details(**{"md5_{:03}".format(i) : result})
assert result == md5ref, "r2r md5 mismatch"
# delete encoded file after each iteration
get_media()._purge_test_artifact(self.encoded)
else:
self.check_output()
self.check_bitrate()
self.check_metrics()
self.check_level()
self.check_forced_idr()
def check_output(self):
pass
def check_bitrate(self):
encsize = os.path.getsize(self.encoded)
bitrate_actual = encsize * 8 * vars(self).get("fps", 25) / 1024.0 / self.frames
get_media()._set_test_details(
size_encoded = encsize,
bitrate_actual = "{:-.2f}".format(bitrate_actual))
if "cbr" == self.rcmode:
bitrate_gap = abs(bitrate_actual - self.bitrate) / self.bitrate
get_media()._set_test_details(bitrate_gap = "{:.2%}".format(bitrate_gap))
# acceptable bitrate within 10% of bitrate
assert(bitrate_gap <= 0.10)
elif "vbr" == self.rcmode:
# acceptable bitrate within 25% of minrate and 10% of maxrate
assert(self.minrate * 0.75 <= bitrate_actual <= self.maxrate * 1.10)
def check_metrics(self):
iopts = ""
if vars(self).get("ffdecoder", None) is not None:
iopts += "-c:v {ffdecoder} "
iopts += "-i {osencoded}"
name = (self.gen_name() + "-{width}x{height}-{format}").format(**vars(self))
self.decoded = get_media()._test_artifact("{}.yuv".format(name))
oopts = (
"-vf 'hwdownload,format={hwformat}' -pix_fmt {mformat} -f rawvideo"
" -vsync passthrough -vframes {frames}"
f" -y {filepath2os(self.decoded)}")
self.call_ffmpeg(iopts.format(**vars(self)), oopts.format(**vars(self)))
get_media().baseline.check_psnr(
psnr = calculate_psnr(
self.source, self.decoded,
self.width, self.height,
self.frames, self.format),
context = self.refctx,
)
def check_level(self):
if vars(self).get("level", None) is None:
return
output = call(
f"{exe2os('ffprobe')}"
" -i {osencoded} -v quiet -show_entries stream=level"
" -of default=nk=1:nw=1".format(**vars(self)))
assert float(output)/30 == self.level, "fail to set target level"
def check_forced_idr(self):
if vars(self).get("vforced_idr", None) is None:
return
judge = {"hevc-8" : 19, "avc" : 5}.get(self.codec, None)
assert judge is not None, f"{self.codec} codec not supported for forced_idr"
output = call(
f"{exe2os('ffmpeg')}"
" -v verbose -i {osencoded} -c:v copy"
" -vframes {frames} -bsf:v trace_headers"
" -f null - 2>&1 | grep 'nal_unit_type.*{judge}' | wc -l".format(**vars(self)))
assert str(self.frames) == output.strip(), "It appears that the forced_idr did not work"
|
from dotenv import load_dotenv
import os
import requests
from bs4 import BeautifulSoup
import json
import re
load_dotenv()
def addConflicts(data):
for department in data:
for course in department["courses"]:
for section in course["sections"]:
section["conflicts"] = getConflict(
data, section["timeslots"], section["subj"] + str(section["crse"])
)
def getConflict(data, check_timeslots, course_code):
conflicts = {}
for department in data:
for course in department["courses"]:
for section in course["sections"]:
for timeslot in section["timeslots"]:
for day in timeslot["days"]:
# Dont conflict with other sections of the same course (or with self)
if course_code == section["subj"] + str(section["crse"]):
continue
# If this course does not have a timeslot just skip it
if timeslot["timeStart"] == -1 or timeslot["timeEnd"] == -1:
continue
for check_timeslot in check_timeslots:
# If this course does not have a timeslot just skip it
if (
check_timeslot["timeStart"] == -1
or check_timeslot["timeEnd"] == -1
):
continue
# If not happening on the same day skip it
if day not in check_timeslot["days"]:
continue
# If the dates dont overlap skip it
if not max(
check_timeslot["dateStart"], timeslot["dateStart"]
) < min(check_timeslot["dateEnd"], timeslot["dateEnd"]):
continue
# There is a conflict
if max(
check_timeslot["timeStart"], timeslot["timeStart"]
) < min(check_timeslot["timeEnd"], timeslot["timeEnd"]):
# JSON does not support hashtables without a value so the value
# is always set to true even though just by being in the conflicts
# hash table is enough to know it conflicts
conflicts[section["crn"]] = True
return conflicts
# We decided not to use this but I left it just in case
# def reformatJson(data):
# departments_copy = data
# reformat = {}
# for department in departments_copy:
# reformat[department['code']] = department
# course_copy = department['courses']
# reformat[department['code']]['courses'] = {}
# for course in course_copy:
# reformat[department['code']]['courses'][f"{course["subj"]}-{course["crse"]}"] = course
# sections_copy = course['sections']
# reformat[department['code']]['courses'][f"{course["subj"]}-{course["crse"]}"]['sections'] = {}
# for section in sections_copy:
# reformat[department['code']]['courses'][f"{course["subj"]}-{course["crse"]}"]['sections'][section['crn']] = section
#
#
# return reformat
#
def getContent(element):
return " ".join(
element.encode_contents().decode().strip().replace("&", "&").split()
)
def getContentFromChild(element, childType):
if len(element.findAll(childType)) > 0:
element = element.findAll(childType)[0]
return getContent(element)
def cleanOutAbbr(text):
text = re.sub("<abbr.*?>", "", text)
text = re.sub("<\/abbr>", "", text)
text = re.sub(
"\s?\([pP]\)", "", text
) # Remove primary instructor indicator (maybe we can use this data somewhere later but for now it is removed)
text = re.sub("\w+\.\s+", "", text)
return text
def timeToMilitary(time, useStartTime):
if "TBA" in time:
return -1
if useStartTime:
time = time.split("-")[0]
else:
time = time.split("-")[1]
offset = 0
if "pm" in time and "12:" not in time:
offset = 1200
return int("".join(time.strip().split(":"))[:4]) + offset
def toTitle(text):
text = text.title()
regex = r"\b[iI]+\b"
matches = re.finditer(regex, text)
for matchNum, match in enumerate(matches, start=1):
text = (
text[: match.start()]
+ text[match.start() : match.end()].upper()
+ text[match.end() :]
)
text = text.replace("'S", "'s")
return text
payload = f'sid={os.getenv('RIN')}&PIN={os.getenv('PASSWORD')}'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
with requests.Session() as s:
s.get(url="https://sis.rpi.edu/rss/twbkwbis.P_WWWLogin")
response = s.request(
"POST",
"https://sis.rpi.edu/rss/twbkwbis.P_ValLogin",
headers=headers,
data=payload,
)
if b"Welcome" not in response.text.encode("utf8"):
print("Failed to log into sis")
exit(1)
url = "https://sis.rpi.edu/rss/bwskfcls.P_GetCrse_Advanced"
payload = f'rsts=dummy&crn=dummy&term_in={os.getenv('CURRENT_TERM')}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=ADMN&sel_subj=USAF&sel_subj=ARCH&sel_subj=ARTS&sel_subj=ASTR&sel_subj=BCBP&sel_subj=BIOL&sel_subj=BMED&sel_subj=CHME&sel_subj=CHEM&sel_subj=CIVL&sel_subj=COGS&sel_subj=COMM&sel_subj=CSCI&sel_subj=ENGR&sel_subj=ERTH&sel_subj=ECON&sel_subj=ECSE&sel_subj=ESCI&sel_subj=ENVE&sel_subj=GSAS&sel_subj=ISYE&sel_subj=ITWS&sel_subj=IENV&sel_subj=IHSS&sel_subj=ISCI&sel_subj=LANG&sel_subj=LGHT&sel_subj=LITR&sel_subj=MGMT&sel_subj=MTLE&sel_subj=MATP&sel_subj=MATH&sel_subj=MANE&sel_subj=USAR&sel_subj=USNA&sel_subj=PHIL&sel_subj=PHYS&sel_subj=PSYC&sel_subj=STSH&sel_subj=STSS&sel_subj=WRIT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
# This payload is for testing. It will only return CSCI classes and will therefore be a bit faster
# payload = f'rsts=dummy&crn=dummy&term_in={os.getenv('CURRENT_TERM')}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=CSCI&sel_subj=LGHT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
headers = {}
response = s.request("POST", url, headers=headers, data=payload)
data = []
# print(response.text.encode('utf8'))
soup = BeautifulSoup(response.text.encode("utf8"), "html.parser")
table = soup.findAll("table", {"class": "datadisplaytable"})[0]
rows = table.findAll("tr")
current_department = None
current_code = None
current_courses = None
last_subject = None
last_course_code = None
for row in rows:
th = row.findAll("th")
if len(th) != 0:
if "ddtitle" in th[0].attrs["class"]:
# if(current_department):
data.append(
{"name": toTitle(getContent(th[0])), "code": "", "courses": []}
)
else:
td = row.findAll("td")
if "TBA" not in getContent(td[8]):
timeslot_data = {
"days": list(getContent(td[8])),
"timeStart": timeToMilitary(
getContentFromChild(td[9], "abbr"), True
),
"timeEnd": timeToMilitary(
getContentFromChild(td[9], "abbr"), False
),
"instructor": ", ".join(
[x.strip() for x in cleanOutAbbr(getContent(td[19])).split(",")]
),
"dateStart": getContentFromChild(td[20], "abbr").split("-")[0],
"dateEnd": getContentFromChild(td[20], "abbr").split("-")[1],
"location": getContentFromChild(td[21], "abbr"),
}
else:
timeslot_data = {
"dateEnd": "",
"dateStart": "",
"days": [],
"instructor": "",
"location": "",
"timeEnd": -1,
"timeStart": -1,
}
if len(getContent(td[1])) == 0:
data[-1]["courses"][-1]["sections"][-1]["timeslots"].append(
timeslot_data
)
continue
credit_min = float(getContent(td[6]).split("-")[0])
credit_max = credit_min
if len(getContent(td[6]).split("-")) > 1:
credit_max = float(getContent(td[6]).split("-")[1])
section_data = {
# "select":getContentFromChild(td[0], 'abbr'),
"crn": int(getContentFromChild(td[1], "a")),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"sec": getContent(td[4]),
# "cmp":getContent(td[5]),
"credMin": credit_min,
"credMax": credit_max,
"title": toTitle(getContent(td[7])),
# "cap": int(getContent(td[10])),
# "act":int(getContent(td[11])),
# "rem": int(getContent(td[12])),
# "wlCap":int(getContent(td[13])),
# "wlAct":int(getContent(td[14])),
# "wlRem":int(getContent(td[15])),
# "xlCap":getContent(td[16]),
# "xlAct":getContent(td[17]),
# "xlRem":getContent(td[18]),
"attribute": getContent(td[22]) if 22 < len(td) else "",
"timeslots": [timeslot_data],
}
if (
section_data["subj"] == last_subject
and section_data["crse"] == last_course_code
):
data[-1]["courses"][-1]["sections"].append(section_data)
continue
last_subject = getContent(td[2])
last_course_code = int(getContent(td[3]))
data[-1]["courses"].append(
{
"title": toTitle(getContent(td[7])),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"id": getContent(td[2]) + "-" + getContent(td[3]),
"sections": [section_data],
}
)
if len(getContent(td[2])) > 0:
data[-1]["code"] = getContent(td[2])
# This is for the old conflict method that has a list for each class that it conflicts with
# addConflicts(data)
# data = reformatJson(data)
# print(json.dumps(data,sort_keys=False,indent=2))
with open(f"courses.json", "w") as outfile: # -{os.getenv("CURRENT_TERM")}
json.dump(data, outfile, sort_keys=False, indent=2)
# Generate binary conflict output
# (32bit crn + 3*64bit conflicts 5am-midnight(by 30min))for every course
day_offsets = {
"M": 0 * 16 * 6,
"T": 1 * 16 * 6,
"W": 2 * 16 * 6,
"R": 3 * 16 * 6,
"F": 4 * 16 * 6,
"S": 5 * 16 * 6,
}
conflicts = {}
crn_to_courses = {}
for dept in data:
for course in dept["courses"]:
for section in course["sections"]:
crn_to_courses[section["crn"]] = course["id"]
conflict = [0] * (64 * 9)
for time in section["timeslots"]:
for day in time["days"]:
for hour in range(700, 2300, 100):
for minute in range(0, 60, 10):
if (
time["timeStart"] <= hour + minute
and time["timeEnd"] > hour + minute
):
minute_idx = int(minute / 10)
hour_idx = int(hour / 100) - 7 # we start at 7am
conflict[
day_offsets[day] + hour_idx * 6 + minute_idx
] = 1
conflicts[section["crn"]] = "".join(str(e) for e in conflict)
with open("mod.rs", "w") as f: # -{os.getenv("CURRENT_TERM")}
f.write(
"""\
//This file was automatically generated. Please do not modify it directly
use ::phf::{phf_map, Map};
pub static CRN_TIMES: Map<u32, [u64; 9]> = phf_map! {
"""
)
for crn, conflict in conflicts.items():
rust_array = f"\t{crn}u32 => ["
for i in range(0, 9 * 64, 64):
if i != 0:
rust_array += ", "
rust_array += str(int(conflict[i : i + 64], 2))
rust_array += "],\n"
f.write(rust_array)
f.write(
"""
};
pub static CRN_COURSES: Map<u32, &'static str> = phf_map! {
"""
)
for crn, course in crn_to_courses.items():
f.write(f'\t{crn}u32 => "{course}",\n')
f.write("};")
| from dotenv import load_dotenv
import os
import requests
from bs4 import BeautifulSoup
import json
import re
load_dotenv()
def addConflicts(data):
for department in data:
for course in department["courses"]:
for section in course["sections"]:
section["conflicts"] = getConflict(
data, section["timeslots"], section["subj"] + str(section["crse"])
)
def getConflict(data, check_timeslots, course_code):
conflicts = {}
for department in data:
for course in department["courses"]:
for section in course["sections"]:
for timeslot in section["timeslots"]:
for day in timeslot["days"]:
# Dont conflict with other sections of the same course (or with self)
if course_code == section["subj"] + str(section["crse"]):
continue
# If this course does not have a timeslot just skip it
if timeslot["timeStart"] == -1 or timeslot["timeEnd"] == -1:
continue
for check_timeslot in check_timeslots:
# If this course does not have a timeslot just skip it
if (
check_timeslot["timeStart"] == -1
or check_timeslot["timeEnd"] == -1
):
continue
# If not happening on the same day skip it
if day not in check_timeslot["days"]:
continue
# If the dates dont overlap skip it
if not max(
check_timeslot["dateStart"], timeslot["dateStart"]
) < min(check_timeslot["dateEnd"], timeslot["dateEnd"]):
continue
# There is a conflict
if max(
check_timeslot["timeStart"], timeslot["timeStart"]
) < min(check_timeslot["timeEnd"], timeslot["timeEnd"]):
# JSON does not support hashtables without a value so the value
# is always set to true even though just by being in the conflicts
# hash table is enough to know it conflicts
conflicts[section["crn"]] = True
return conflicts
# We decided not to use this but I left it just in case
# def reformatJson(data):
# departments_copy = data
# reformat = {}
# for department in departments_copy:
# reformat[department['code']] = department
# course_copy = department['courses']
# reformat[department['code']]['courses'] = {}
# for course in course_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"] = course
# sections_copy = course['sections']
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'] = {}
# for section in sections_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'][section['crn']] = section
#
#
# return reformat
#
def getContent(element):
return " ".join(
element.encode_contents().decode().strip().replace("&", "&").split()
)
def getContentFromChild(element, childType):
if len(element.findAll(childType)) > 0:
element = element.findAll(childType)[0]
return getContent(element)
def cleanOutAbbr(text):
text = re.sub("<abbr.*?>", "", text)
text = re.sub("<\/abbr>", "", text)
text = re.sub(
"\s?\([pP]\)", "", text
) # Remove primary instructor indicator (maybe we can use this data somewhere later but for now it is removed)
text = re.sub("\w+\.\s+", "", text)
return text
def timeToMilitary(time, useStartTime):
if "TBA" in time:
return -1
if useStartTime:
time = time.split("-")[0]
else:
time = time.split("-")[1]
offset = 0
if "pm" in time and "12:" not in time:
offset = 1200
return int("".join(time.strip().split(":"))[:4]) + offset
def toTitle(text):
text = text.title()
regex = r"\b[iI]+\b"
matches = re.finditer(regex, text)
for matchNum, match in enumerate(matches, start=1):
text = (
text[: match.start()]
+ text[match.start() : match.end()].upper()
+ text[match.end() :]
)
text = text.replace("'S", "'s")
return text
payload = f'sid={os.getenv("RIN")}&PIN={os.getenv("PASSWORD")}'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
with requests.Session() as s:
s.get(url="https://sis.rpi.edu/rss/twbkwbis.P_WWWLogin")
response = s.request(
"POST",
"https://sis.rpi.edu/rss/twbkwbis.P_ValLogin",
headers=headers,
data=payload,
)
if b"Welcome" not in response.text.encode("utf8"):
print("Failed to log into sis")
exit(1)
url = "https://sis.rpi.edu/rss/bwskfcls.P_GetCrse_Advanced"
payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=ADMN&sel_subj=USAF&sel_subj=ARCH&sel_subj=ARTS&sel_subj=ASTR&sel_subj=BCBP&sel_subj=BIOL&sel_subj=BMED&sel_subj=CHME&sel_subj=CHEM&sel_subj=CIVL&sel_subj=COGS&sel_subj=COMM&sel_subj=CSCI&sel_subj=ENGR&sel_subj=ERTH&sel_subj=ECON&sel_subj=ECSE&sel_subj=ESCI&sel_subj=ENVE&sel_subj=GSAS&sel_subj=ISYE&sel_subj=ITWS&sel_subj=IENV&sel_subj=IHSS&sel_subj=ISCI&sel_subj=LANG&sel_subj=LGHT&sel_subj=LITR&sel_subj=MGMT&sel_subj=MTLE&sel_subj=MATP&sel_subj=MATH&sel_subj=MANE&sel_subj=USAR&sel_subj=USNA&sel_subj=PHIL&sel_subj=PHYS&sel_subj=PSYC&sel_subj=STSH&sel_subj=STSS&sel_subj=WRIT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
# This payload is for testing. It will only return CSCI classes and will therefore be a bit faster
# payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=CSCI&sel_subj=LGHT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
headers = {}
response = s.request("POST", url, headers=headers, data=payload)
data = []
# print(response.text.encode('utf8'))
soup = BeautifulSoup(response.text.encode("utf8"), "html.parser")
table = soup.findAll("table", {"class": "datadisplaytable"})[0]
rows = table.findAll("tr")
current_department = None
current_code = None
current_courses = None
last_subject = None
last_course_code = None
for row in rows:
th = row.findAll("th")
if len(th) != 0:
if "ddtitle" in th[0].attrs["class"]:
# if(current_department):
data.append(
{"name": toTitle(getContent(th[0])), "code": "", "courses": []}
)
else:
td = row.findAll("td")
if "TBA" not in getContent(td[8]):
timeslot_data = {
"days": list(getContent(td[8])),
"timeStart": timeToMilitary(
getContentFromChild(td[9], "abbr"), True
),
"timeEnd": timeToMilitary(
getContentFromChild(td[9], "abbr"), False
),
"instructor": ", ".join(
[x.strip() for x in cleanOutAbbr(getContent(td[19])).split(",")]
),
"dateStart": getContentFromChild(td[20], "abbr").split("-")[0],
"dateEnd": getContentFromChild(td[20], "abbr").split("-")[1],
"location": getContentFromChild(td[21], "abbr"),
}
else:
timeslot_data = {
"dateEnd": "",
"dateStart": "",
"days": [],
"instructor": "",
"location": "",
"timeEnd": -1,
"timeStart": -1,
}
if len(getContent(td[1])) == 0:
data[-1]["courses"][-1]["sections"][-1]["timeslots"].append(
timeslot_data
)
continue
credit_min = float(getContent(td[6]).split("-")[0])
credit_max = credit_min
if len(getContent(td[6]).split("-")) > 1:
credit_max = float(getContent(td[6]).split("-")[1])
section_data = {
# "select":getContentFromChild(td[0], 'abbr'),
"crn": int(getContentFromChild(td[1], "a")),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"sec": getContent(td[4]),
# "cmp":getContent(td[5]),
"credMin": credit_min,
"credMax": credit_max,
"title": toTitle(getContent(td[7])),
# "cap": int(getContent(td[10])),
# "act":int(getContent(td[11])),
# "rem": int(getContent(td[12])),
# "wlCap":int(getContent(td[13])),
# "wlAct":int(getContent(td[14])),
# "wlRem":int(getContent(td[15])),
# "xlCap":getContent(td[16]),
# "xlAct":getContent(td[17]),
# "xlRem":getContent(td[18]),
"attribute": getContent(td[22]) if 22 < len(td) else "",
"timeslots": [timeslot_data],
}
if (
section_data["subj"] == last_subject
and section_data["crse"] == last_course_code
):
data[-1]["courses"][-1]["sections"].append(section_data)
continue
last_subject = getContent(td[2])
last_course_code = int(getContent(td[3]))
data[-1]["courses"].append(
{
"title": toTitle(getContent(td[7])),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"id": getContent(td[2]) + "-" + getContent(td[3]),
"sections": [section_data],
}
)
if len(getContent(td[2])) > 0:
data[-1]["code"] = getContent(td[2])
# This is for the old conflict method that has a list for each class that it conflicts with
# addConflicts(data)
# data = reformatJson(data)
# print(json.dumps(data,sort_keys=False,indent=2))
with open(f"courses.json", "w") as outfile: # -{os.getenv("CURRENT_TERM")}
json.dump(data, outfile, sort_keys=False, indent=2)
# Generate binary conflict output
# (32bit crn + 3*64bit conflicts 5am-midnight(by 30min))for every course
day_offsets = {
"M": 0 * 16 * 6,
"T": 1 * 16 * 6,
"W": 2 * 16 * 6,
"R": 3 * 16 * 6,
"F": 4 * 16 * 6,
"S": 5 * 16 * 6,
}
conflicts = {}
crn_to_courses = {}
for dept in data:
for course in dept["courses"]:
for section in course["sections"]:
crn_to_courses[section["crn"]] = course["id"]
conflict = [0] * (64 * 9)
for time in section["timeslots"]:
for day in time["days"]:
for hour in range(700, 2300, 100):
for minute in range(0, 60, 10):
if (
time["timeStart"] <= hour + minute
and time["timeEnd"] > hour + minute
):
minute_idx = int(minute / 10)
hour_idx = int(hour / 100) - 7 # we start at 7am
conflict[
day_offsets[day] + hour_idx * 6 + minute_idx
] = 1
conflicts[section["crn"]] = "".join(str(e) for e in conflict)
with open("mod.rs", "w") as f: # -{os.getenv("CURRENT_TERM")}
f.write(
"""\
//This file was automatically generated. Please do not modify it directly
use ::phf::{phf_map, Map};
pub static CRN_TIMES: Map<u32, [u64; 9]> = phf_map! {
"""
)
for crn, conflict in conflicts.items():
rust_array = f"\t{crn}u32 => ["
for i in range(0, 9 * 64, 64):
if i != 0:
rust_array += ", "
rust_array += str(int(conflict[i : i + 64], 2))
rust_array += "],\n"
f.write(rust_array)
f.write(
"""
};
pub static CRN_COURSES: Map<u32, &'static str> = phf_map! {
"""
)
for crn, course in crn_to_courses.items():
f.write(f'\t{crn}u32 => "{course}",\n')
f.write("};")
|
from simple_websocket_server import WebSocketServer, WebSocket
from json import loads,dumps
from time import sleep
from dateutil import parser
from threading import Thread
from random import randint
from datetime import datetime,timedelta
from uuid import uuid4
import traceback
iso=lambda x:datetime.fromtimestamp(x).isoformat()
tsp=lambda x:parser.parse(x).timestamp()
tiper={int:'string',str:'string',list:'select',tuple:'multiselect',bool:'checkbox'}
class SimpleEcho(WebSocket):
def xsend(self,msg):
self.send_message(dumps(msg))
# print(f"Send: {msg}")
def xsend_all(self,msg):
for n in clients:
n.send_message(dumps(msg))
# print(f"Send all: {msg}")
def xsend_xall(self,msg):
for n in clients:
if self is not n:
n.send_message(dumps(msg))
# print(f"Send: {msg}")
def get_events(self,name):
events=[]
for n in self.gdata['x']['scheduler']:
if n['name']==name:
events.append({
'id':n['id'],
'title':f"#{n["id"]} {n["name"]}",
'start':iso(n['start']),
'end':iso(n['end'])
})
return events
def get_calendar_events(self):
events=[]
for n in self.gdata['x']['scheduler']:
events.append({
'id':f"{n["id"]}{n["name"]}",
'title':f"#{n["id"]} {n["name"]}",
'start':iso(n['start']),
'end':iso(n['end'])
})
return events
def get_event_integration(self,name):
events=[]
for n in self.gdata['x']['scheduler']:
if n['name']==name:
events.append({
'id':n['id'],
'title':f"#{n["id"]} {n["name"]}",
'start':iso(n['start']),
'end':iso(n['end']),
'vars':n['vars']
})
return events
def handle(self):
try:
msg=loads(self.data)
print(f"Receive: {msg}")
if msg.get('type')=="auth_token":
if users.get(msg.get('token')):
self.user=users[msg['token']]
else:
self.xsend({'type':'auth_token','auth':False})
return
if msg.get('type')=="authenticate":
self.user=msg['user']
token=str(uuid4())
users[token]=msg['user']
self.xsend({'type':'authenticate', 'msg': { 'id': '1', 'username': msg['user'], 'role': 'admin', 'token': token}})
#self.xsend({'type':'authenticate', 'error': 'user dont exist'})
if not hasattr(self, 'user'):return
if msg.get('type')=="unsubscribe":
# print(f"Receive: {msg}")
if msg['msg']=="getCode":
self.gdata['imports']['loader'].q.put(1)
if msg['msg']=="getVars":
self.gdata['imports']['loader'].q.put(1)
if msg['msg']=="glu":
self.gdata['imports']['logs'].qsend.setdefault(self.lastname_run_id,[]).remove(self)
if msg.get('type')=="get_schedule_integrations":
self.xsend(self.get_event_integration('start_scenario'))
if msg.get('type')=="set_schedule_integrations":
job=self.gdata['x']['jobs'].get('start_scenario')
run_id=str(job['last_build_id'])
job['last_build_id']+=1
self.gdata['x']['scheduler'].append({'id':run_id,'start':tsp(msg['start']),'end':tsp(msg['end']),'name':'start_scenario','vars':msg['vars']})
self.xsend_all({'type':'get_schedule','events':self.get_events('start_scenario')})
if msg.get('type')=="get_schedule":
# TODO: n['date']
self.xsend({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="get_calendar":
# TODO: n['date']
self.xsend({'type':'get_calendar','events':self.get_calendar_events()})
if msg.get('type')=="schedule_select":
job=self.gdata['x']['jobs'].get(msg['name'])
run_id=str(job['last_build_id'])
job['last_build_id']+=1
job_vars={}
for n in msg['vars']:
if n.get('select') is not None:
job_vars[n['name']]=n.get('select')
elif n.get('selected') is not None:
job_vars[n['name']]=n.get('selected')
else:
job_vars[n['name']]=n['value']
scheduler=self.gdata['x']['scheduler']
scheduler.append({'id':run_id,'start':tsp(msg['start']),'end':tsp(msg['end']),'name':msg['name'],'vars':job_vars})
self.xsend_all({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="schedule_delete":
scheduler=self.gdata['x']['scheduler']
[scheduler.remove(n) for n in scheduler.copy() if n['id']==msg['id'] and n['name']==msg['name']]
self.xsend_all({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="schedule_move":
scheduler=self.gdata['x']['scheduler']
[n.update({'start':tsp(msg['start']),'end':tsp(msg['end'])}) for n in scheduler.copy() if n['id']==msg['id'] and n['name']==msg['name']]
self.xsend_xall({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="getCode":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
self.xsend({'type':'getCode','code':job['code']})
if msg.get('type')=="getVars":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
self.xsend({'type':'getVars','vars':job['vars']})
if msg.get('type')=="getLogs":
self.lastname=msg['name']
run_id=f"{msg["id"]}{msg["name"]}"
logs=self.gdata['logs'][run_id]
self.xsend({'type':'getLogs','logs':'\n'.join(logs)})
if msg.get('type')=="glu":
run_id=f"{msg["id"]}{msg["name"]}"
self.lastname_run_id=run_id
self.gdata['imports']['logs'].qsend.setdefault(run_id,[]).append(self)
if msg.get('type')=="setCode":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
job['code']=msg['code']
self.xsend_xall({'type':'getCode','code':job['code']})
if msg.get('type')=="setVars":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
job['vars']=msg['vars']
self.xsend_xall({'type':'getVars','vars':job['vars']})
if msg.get('type')=="jobs":
folder=['jobs']+msg.get('folder')
last_folder=self.gdata['x']['root']
for n in folder:
last_folder=last_folder.get(n)
jobs=[]
for n in last_folder:
if not last_folder.get(n):
job={'name':n,'type':'job'}
name='\\'.join(folder+[n])
history=self.gdata['x']['jobs'][name]['history']
if history:
job.update(history[str(max(int(n) for n in history.keys()))])
jobs.append(job)
else:
jobs.append({'name':n,'type':'folder'})
# jobs=[{'name':k } for k,v in self.gdata['x']['jobs'].items()]
# for job in jobs:
# history=self.gdata['x']['jobs'][job['name']]['history']
# if history:
# job.update(history[str(max(int(n) for n in history.keys()))])
# print(self.gdata['x']['scheduler'])
self.xsend({'type':'jobs','msg':jobs})
if msg.get('type')=="job":
self.lastname=msg['msg']
job=self.gdata['x']['jobs'].get(msg['msg']).copy()
job['name']=msg['msg']
self.xsend({'type':'job','msg':job})
if msg.get('type')=="hosts":
hosts={'master':self.gdata['x']['master'], 'servers':self.gdata['x']['servers']}
self.xsend({'type':'hosts','msg':hosts})
if msg.get('type')=="name":
if msg["old"]=='New Job':
self.gdata['x']['jobs'][msg['new']]={'vars':'','code':'','history':{},'status':1,'last_build_id':1}
else:
job=self.gdata['x']['jobs'].pop(msg['old'])
self.gdata['x']['jobs'][msg['new']]=job
print(f'Change job name from {msg['old']} to {msg['new']}')
if msg.get('type')=="build":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name']).copy()
job_vars=self.gdata['imports']['executor'].process.run(job['vars'],self.gdata,{},-1,msg['name'])
job_vars.pop('run_id')
job_vars.pop('job_name')
job_vars=[{'name':k,'value':v,'type':tiper.get(type(v),'xzchto')} for k,v in job_vars.items()]
self.xsend({'type':'build','msg':job_vars})
if msg.get('type')=="delete":
self.gdata['x']['jobs'].pop(msg['name'])
# TODO: Подписка на обновление шедулера
if msg.get('type')=="history":
job=self.gdata['x']['jobs'].get(msg['name'])
history=[{'id':k,'status':v['status'],'start':v.get('start',''),'end':v.get('end',''),'delta':v.get('delta','')} for k,v in job['history'].items()]
for k in self.gdata['x']['scheduler']:
if k['name'] == msg['name']:
history.append({'id':k['id'],'status':'sheduled','start':iso(k['start']),'end':iso(k['end'])})
print(history)
self.xsend({'type':'history','msg':history})
self.gdata['imports']['executor'].qhistory.setdefault(msg['name'],[]).append(self)
if msg.get('type')=='stop':
print(msg)
except:
print(self.data)
traceback.print_exc()
def connected(self):
print(f'WebSocket connected: {self.address}')
clients.append(self)
# Thread(target=run,args=(self,)).start()
def handle_close(self):
print(self.address, 'closed')
clients.remove(self)
clients=[]
users={}
def work(data):
server = WebSocketServer('0.0.0.0', 5124, SimpleEcho, data)
server.serve_forever()
| from simple_websocket_server import WebSocketServer, WebSocket
from json import loads,dumps
from time import sleep
from dateutil import parser
from threading import Thread
from random import randint
from datetime import datetime,timedelta
from uuid import uuid4
import traceback
iso=lambda x:datetime.fromtimestamp(x).isoformat()
tsp=lambda x:parser.parse(x).timestamp()
tiper={int:'string',str:'string',list:'select',tuple:'multiselect',bool:'checkbox'}
class SimpleEcho(WebSocket):
def xsend(self,msg):
self.send_message(dumps(msg))
# print(f"Send: {msg}")
def xsend_all(self,msg):
for n in clients:
n.send_message(dumps(msg))
# print(f"Send all: {msg}")
def xsend_xall(self,msg):
for n in clients:
if self is not n:
n.send_message(dumps(msg))
# print(f"Send: {msg}")
def get_events(self,name):
events=[]
for n in self.gdata['x']['scheduler']:
if n['name']==name:
events.append({
'id':n['id'],
'title':f"#{n['id']} {n['name']}",
'start':iso(n['start']),
'end':iso(n['end'])
})
return events
def get_calendar_events(self):
events=[]
for n in self.gdata['x']['scheduler']:
events.append({
'id':f"{n['id']}{n['name']}",
'title':f"#{n['id']} {n['name']}",
'start':iso(n['start']),
'end':iso(n['end'])
})
return events
def get_event_integration(self,name):
events=[]
for n in self.gdata['x']['scheduler']:
if n['name']==name:
events.append({
'id':n['id'],
'title':f"#{n['id']} {n['name']}",
'start':iso(n['start']),
'end':iso(n['end']),
'vars':n['vars']
})
return events
def handle(self):
try:
msg=loads(self.data)
print(f"Receive: {msg}")
if msg.get('type')=="auth_token":
if users.get(msg.get('token')):
self.user=users[msg['token']]
else:
self.xsend({'type':'auth_token','auth':False})
return
if msg.get('type')=="authenticate":
self.user=msg['user']
token=str(uuid4())
users[token]=msg['user']
self.xsend({'type':'authenticate', 'msg': { 'id': '1', 'username': msg['user'], 'role': 'admin', 'token': token}})
#self.xsend({'type':'authenticate', 'error': 'user dont exist'})
if not hasattr(self, 'user'):return
if msg.get('type')=="unsubscribe":
# print(f"Receive: {msg}")
if msg['msg']=="getCode":
self.gdata['imports']['loader'].q.put(1)
if msg['msg']=="getVars":
self.gdata['imports']['loader'].q.put(1)
if msg['msg']=="glu":
self.gdata['imports']['logs'].qsend.setdefault(self.lastname_run_id,[]).remove(self)
if msg.get('type')=="get_schedule_integrations":
self.xsend(self.get_event_integration('start_scenario'))
if msg.get('type')=="set_schedule_integrations":
job=self.gdata['x']['jobs'].get('start_scenario')
run_id=str(job['last_build_id'])
job['last_build_id']+=1
self.gdata['x']['scheduler'].append({'id':run_id,'start':tsp(msg['start']),'end':tsp(msg['end']),'name':'start_scenario','vars':msg['vars']})
self.xsend_all({'type':'get_schedule','events':self.get_events('start_scenario')})
if msg.get('type')=="get_schedule":
# TODO: n['date']
self.xsend({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="get_calendar":
# TODO: n['date']
self.xsend({'type':'get_calendar','events':self.get_calendar_events()})
if msg.get('type')=="schedule_select":
job=self.gdata['x']['jobs'].get(msg['name'])
run_id=str(job['last_build_id'])
job['last_build_id']+=1
job_vars={}
for n in msg['vars']:
if n.get('select') is not None:
job_vars[n['name']]=n.get('select')
elif n.get('selected') is not None:
job_vars[n['name']]=n.get('selected')
else:
job_vars[n['name']]=n['value']
scheduler=self.gdata['x']['scheduler']
scheduler.append({'id':run_id,'start':tsp(msg['start']),'end':tsp(msg['end']),'name':msg['name'],'vars':job_vars})
self.xsend_all({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="schedule_delete":
scheduler=self.gdata['x']['scheduler']
[scheduler.remove(n) for n in scheduler.copy() if n['id']==msg['id'] and n['name']==msg['name']]
self.xsend_all({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="schedule_move":
scheduler=self.gdata['x']['scheduler']
[n.update({'start':tsp(msg['start']),'end':tsp(msg['end'])}) for n in scheduler.copy() if n['id']==msg['id'] and n['name']==msg['name']]
self.xsend_xall({'type':'get_schedule','events':self.get_events(msg['name'])})
if msg.get('type')=="getCode":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
self.xsend({'type':'getCode','code':job['code']})
if msg.get('type')=="getVars":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
self.xsend({'type':'getVars','vars':job['vars']})
if msg.get('type')=="getLogs":
self.lastname=msg['name']
run_id=f"{msg['id']}{msg['name']}"
logs=self.gdata['logs'][run_id]
self.xsend({'type':'getLogs','logs':'\n'.join(logs)})
if msg.get('type')=="glu":
run_id=f"{msg['id']}{msg['name']}"
self.lastname_run_id=run_id
self.gdata['imports']['logs'].qsend.setdefault(run_id,[]).append(self)
if msg.get('type')=="setCode":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
job['code']=msg['code']
self.xsend_xall({'type':'getCode','code':job['code']})
if msg.get('type')=="setVars":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name'])
job['vars']=msg['vars']
self.xsend_xall({'type':'getVars','vars':job['vars']})
if msg.get('type')=="jobs":
folder=['jobs']+msg.get('folder')
last_folder=self.gdata['x']['root']
for n in folder:
last_folder=last_folder.get(n)
jobs=[]
for n in last_folder:
if not last_folder.get(n):
job={'name':n,'type':'job'}
name='\\'.join(folder+[n])
history=self.gdata['x']['jobs'][name]['history']
if history:
job.update(history[str(max(int(n) for n in history.keys()))])
jobs.append(job)
else:
jobs.append({'name':n,'type':'folder'})
# jobs=[{'name':k } for k,v in self.gdata['x']['jobs'].items()]
# for job in jobs:
# history=self.gdata['x']['jobs'][job['name']]['history']
# if history:
# job.update(history[str(max(int(n) for n in history.keys()))])
# print(self.gdata['x']['scheduler'])
self.xsend({'type':'jobs','msg':jobs})
if msg.get('type')=="job":
self.lastname=msg['msg']
job=self.gdata['x']['jobs'].get(msg['msg']).copy()
job['name']=msg['msg']
self.xsend({'type':'job','msg':job})
if msg.get('type')=="hosts":
hosts={'master':self.gdata['x']['master'], 'servers':self.gdata['x']['servers']}
self.xsend({'type':'hosts','msg':hosts})
if msg.get('type')=="name":
if msg["old"]=='New Job':
self.gdata['x']['jobs'][msg['new']]={'vars':'','code':'','history':{},'status':1,'last_build_id':1}
else:
job=self.gdata['x']['jobs'].pop(msg['old'])
self.gdata['x']['jobs'][msg['new']]=job
print(f'Change job name from {msg["old"]} to {msg["new"]}')
if msg.get('type')=="build":
self.lastname=msg['name']
job=self.gdata['x']['jobs'].get(msg['name']).copy()
job_vars=self.gdata['imports']['executor'].process.run(job['vars'],self.gdata,{},-1,msg['name'])
job_vars.pop('run_id')
job_vars.pop('job_name')
job_vars=[{'name':k,'value':v,'type':tiper.get(type(v),'xzchto')} for k,v in job_vars.items()]
self.xsend({'type':'build','msg':job_vars})
if msg.get('type')=="delete":
self.gdata['x']['jobs'].pop(msg['name'])
# TODO: Подписка на обновление шедулера
if msg.get('type')=="history":
job=self.gdata['x']['jobs'].get(msg['name'])
history=[{'id':k,'status':v['status'],'start':v.get('start',''),'end':v.get('end',''),'delta':v.get('delta','')} for k,v in job['history'].items()]
for k in self.gdata['x']['scheduler']:
if k['name'] == msg['name']:
history.append({'id':k['id'],'status':'sheduled','start':iso(k['start']),'end':iso(k['end'])})
print(history)
self.xsend({'type':'history','msg':history})
self.gdata['imports']['executor'].qhistory.setdefault(msg['name'],[]).append(self)
if msg.get('type')=='stop':
print(msg)
except:
print(self.data)
traceback.print_exc()
def connected(self):
print(f'WebSocket connected: {self.address}')
clients.append(self)
# Thread(target=run,args=(self,)).start()
def handle_close(self):
print(self.address, 'closed')
clients.remove(self)
clients=[]
users={}
def work(data):
server = WebSocketServer('0.0.0.0', 5124, SimpleEcho, data)
server.serve_forever()
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
def add_with_priority(self, priority, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
import pybullet_envs
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
from stable_baselines3.common.atari_wrappers import (
NoopResetEnv, MaxAndSkipEnv, EpisodicLifeEnv, FireResetEnv, WarpFrame, ClipRewardEnv)
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
from stable_baselines3.common.vec_env import VecFrameStack
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DQN agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--num-envs', type=int, default=8,
help='the number of parallel game environment')
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--pr-alpha', type=float, default=0.6,
help='alpha parameter for prioritized replay buffer')
parser.add_argument('--pr-beta0', type=float, default=0.4,
help='initial value of beta for prioritized replay buffer')
parser.add_argument('--pr-eps', type=float, default=1e-6,
help='epsilon to add to the TD errors when updating priorities.')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
def make_env(gym_id, seed, idx):
def thunk():
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
random.seed(args.seed)
return env
return thunk
envs = DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)])
assert isinstance(envs.action_space, Discrete), "only discrete action space is supported"
# ALGO LOGIC: initialize agent here:
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, env, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
nn.Linear(512, env.action_space.n)
)
def forward(self, x):
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = PrioritizedReplayBuffer(args.buffer_size, args.pr_alpha)
q_network = QNetwork(envs).to(device)
target_network = QNetwork(envs).to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
# c stands for combined
c_obs = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)
c_actions = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
c_rewards = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
c_next_obses = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)
c_dones = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
update_step = 0
obs = envs.reset()
for global_step in range(0, args.total_timesteps, args.num_envs):
update_step += 1
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
beta = linear_schedule(args.pr_beta0, 1.0, args.total_timesteps, global_step)
if global_step < args.learning_starts:
action = [envs.action_space.sample() for _ in range(envs.num_envs)]
next_obs, reward, done, infos = envs.step(action)
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info["episode"]["r"]}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
for o, a, r, n, d in zip(obs, action, reward, next_obs, done):
rb.add(o, a, r, n, d)
obs = next_obs
continue
# batch the sampled obs and the environment obs together to make it faster
experience = rb.sample(args.batch_size, beta=beta)
(s_obs, s_actions, s_rewards, s_next_obses, s_dones, s_weights, s_batch_idxes) = experience
c_obs[:args.batch_size], c_obs[args.batch_size:] = s_obs, obs
logits = q_network.forward(torch.from_numpy(c_obs).to(device))
_, env_obs_logits = logits[:args.batch_size], logits[args.batch_size:]
# env step
action = torch.argmax(env_obs_logits, dim=1)
random_action = torch.randint(0, envs.action_space.n, (envs.num_envs,), device=device)
random_action_flag = torch.rand(envs.num_envs, device=device) > epsilon
action = torch.where(random_action_flag, action, random_action)
cpu_action = action.cpu().numpy()
next_obs, reward, done, infos = envs.step(cpu_action)
c_actions[:args.batch_size], c_actions[args.batch_size:] = s_actions, cpu_action
c_rewards[:args.batch_size], c_rewards[args.batch_size:] = s_rewards, reward
c_next_obses[:args.batch_size], c_next_obses[args.batch_size:] = s_next_obses, next_obs
c_dones[:args.batch_size], c_dones[args.batch_size:] = s_dones, done
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
temp_c_next_obses = torch.from_numpy(c_next_obses).to(device)
current_value = q_network.forward(temp_c_next_obses)
target_value = target_network.forward(temp_c_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.from_numpy(c_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(c_dones).to(device))
old_val = logits.gather(1, torch.LongTensor(c_actions).view(-1,1).to(device)).squeeze()
td_errors = td_target - old_val
loss = (td_errors[:args.batch_size] ** 2).mean()
writer.add_scalar("losses/td_loss", loss, global_step)
if update_step % args.train_frequency == 0:
# update the weights in the prioritized replay
new_priorities = np.abs(td_errors[:args.batch_size].tolist()) + args.pr_eps
rb.update_priorities(s_batch_idxes, new_priorities)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if update_step % args.target_network_frequency == 0:
print("updated")
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info["episode"]["r"]}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
for p, o, a, r, n, d in zip(np.abs(td_errors[args.batch_size:].tolist()), obs, cpu_action, reward, next_obs, done):
rb.add_with_priority(p, o, a, r, n, d)
obs = next_obs
envs.close()
writer.close() | # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
def add_with_priority(self, priority, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
import pybullet_envs
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
from stable_baselines3.common.atari_wrappers import (
NoopResetEnv, MaxAndSkipEnv, EpisodicLifeEnv, FireResetEnv, WarpFrame, ClipRewardEnv)
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
from stable_baselines3.common.vec_env import VecFrameStack
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DQN agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--num-envs', type=int, default=8,
help='the number of parallel game environment')
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--pr-alpha', type=float, default=0.6,
help='alpha parameter for prioritized replay buffer')
parser.add_argument('--pr-beta0', type=float, default=0.4,
help='initial value of beta for prioritized replay buffer')
parser.add_argument('--pr-eps', type=float, default=1e-6,
help='epsilon to add to the TD errors when updating priorities.')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
def make_env(gym_id, seed, idx):
def thunk():
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
random.seed(args.seed)
return env
return thunk
envs = DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)])
assert isinstance(envs.action_space, Discrete), "only discrete action space is supported"
# ALGO LOGIC: initialize agent here:
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, env, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
nn.Linear(512, env.action_space.n)
)
def forward(self, x):
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = PrioritizedReplayBuffer(args.buffer_size, args.pr_alpha)
q_network = QNetwork(envs).to(device)
target_network = QNetwork(envs).to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
# c stands for combined
c_obs = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)
c_actions = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
c_rewards = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
c_next_obses = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)
c_dones = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)
update_step = 0
obs = envs.reset()
for global_step in range(0, args.total_timesteps, args.num_envs):
update_step += 1
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
beta = linear_schedule(args.pr_beta0, 1.0, args.total_timesteps, global_step)
if global_step < args.learning_starts:
action = [envs.action_space.sample() for _ in range(envs.num_envs)]
next_obs, reward, done, infos = envs.step(action)
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
for o, a, r, n, d in zip(obs, action, reward, next_obs, done):
rb.add(o, a, r, n, d)
obs = next_obs
continue
# batch the sampled obs and the environment obs together to make it faster
experience = rb.sample(args.batch_size, beta=beta)
(s_obs, s_actions, s_rewards, s_next_obses, s_dones, s_weights, s_batch_idxes) = experience
c_obs[:args.batch_size], c_obs[args.batch_size:] = s_obs, obs
logits = q_network.forward(torch.from_numpy(c_obs).to(device))
_, env_obs_logits = logits[:args.batch_size], logits[args.batch_size:]
# env step
action = torch.argmax(env_obs_logits, dim=1)
random_action = torch.randint(0, envs.action_space.n, (envs.num_envs,), device=device)
random_action_flag = torch.rand(envs.num_envs, device=device) > epsilon
action = torch.where(random_action_flag, action, random_action)
cpu_action = action.cpu().numpy()
next_obs, reward, done, infos = envs.step(cpu_action)
c_actions[:args.batch_size], c_actions[args.batch_size:] = s_actions, cpu_action
c_rewards[:args.batch_size], c_rewards[args.batch_size:] = s_rewards, reward
c_next_obses[:args.batch_size], c_next_obses[args.batch_size:] = s_next_obses, next_obs
c_dones[:args.batch_size], c_dones[args.batch_size:] = s_dones, done
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
temp_c_next_obses = torch.from_numpy(c_next_obses).to(device)
current_value = q_network.forward(temp_c_next_obses)
target_value = target_network.forward(temp_c_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.from_numpy(c_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(c_dones).to(device))
old_val = logits.gather(1, torch.LongTensor(c_actions).view(-1,1).to(device)).squeeze()
td_errors = td_target - old_val
loss = (td_errors[:args.batch_size] ** 2).mean()
writer.add_scalar("losses/td_loss", loss, global_step)
if update_step % args.train_frequency == 0:
# update the weights in the prioritized replay
new_priorities = np.abs(td_errors[:args.batch_size].tolist()) + args.pr_eps
rb.update_priorities(s_batch_idxes, new_priorities)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if update_step % args.target_network_frequency == 0:
print("updated")
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: record rewards for plotting purposes
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
break
for p, o, a, r, n, d in zip(np.abs(td_errors[args.batch_size:].tolist()), obs, cpu_action, reward, next_obs, done):
rb.add_with_priority(p, o, a, r, n, d)
obs = next_obs
envs.close()
writer.close() |
""""
Copyright © Krypton 2021 - https://github.com/kkrypt0nn (https://krypt0n.co.uk)
Description:
This is a template to create your own discord bot in python.
Version: 4.0.1
"""
import json
import os
import platform
import random
import sys
import disnake
from disnake import ApplicationCommandInteraction
from disnake.ext import tasks, commands
from disnake.ext.commands import Bot
import exceptions
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
with open("config.json") as file:
config = json.load(file)
"""
Setup bot intents (events restrictions)
For more information about intents, please go to the following websites:
https://docs.disnake.dev/en/latest/intents.html
https://docs.disnake.dev/en/latest/intents.html#privileged-intents
Default Intents:
intents.bans = True
intents.dm_messages = False
intents.dm_reactions = False
intents.dm_typing = False
intents.emojis = True
intents.guild_messages = True
intents.guild_reactions = True
intents.guild_typing = False
intents.guilds = True
intents.integrations = True
intents.invites = True
intents.reactions = True
intents.typing = False
intents.voice_states = False
intents.webhooks = False
Privileged Intents (Needs to be enabled on dev page), please use them only if you need them:
intents.members = True
intents.messages = True
intents.presences = True
"""
intents = disnake.Intents.default()
bot = Bot(command_prefix=config["prefix"], intents=intents)
# The code in this even is executed when the bot is ready
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
print(f"disnake API version: {disnake.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} {platform.release()} ({os.name})")
print("-------------------")
status_task.start()
# Setup the game status task of the bot
@tasks.loop(minutes=1.0)
async def status_task():
statuses = ["with you!", "with discord!", "with humans!"]
await bot.change_presence(activity=disnake.Game(random.choice(statuses)))
# Removes the default help command of discord.py to be able to create our custom help command.
bot.remove_command("help")
if __name__ == "__main__":
for file in os.listdir("./cogs"):
if file.endswith(".py"):
extension = file[:-3]
try:
bot.load_extension(f"cogs.{extension}")
print(f"Loaded extension '{extension}'")
except Exception as e:
exception = f"{type(e).__name__}: {e}"
print(f"Failed to load extension {extension}\n{exception}")
# The code in this event is executed every time someone sends a message, with or without the prefix
@bot.event
async def on_message(message: disnake.Message):
# Ignores if a command is being executed by a bot or by the bot itself
if message.author == bot.user or message.author.bot:
return
await bot.process_commands(message)
# The code in this event is executed every time a slash command has been *successfully* executed
@bot.event
async def on_slash_command(interaction: ApplicationCommandInteraction):
print(
f"Executed {interaction.data.name} command in {interaction.guild.name} (ID: {interaction.guild.id}) by {interaction.author} (ID: {interaction.author.id})")
# The code in this event is executed every time a valid slash command catches an error
@bot.event
async def on_slash_command_error(interaction: ApplicationCommandInteraction, error: Exception):
if isinstance(error, exceptions.UserBlacklisted):
"""
The code here will only execute if the error is an instance of 'UserBlacklisted', which can occur when using
the @checks.is_owner() check in your command, or you can raise the error by yourself.
'hidden=True' will make so that only the user who execute the command can see the message
"""
embed = disnake.Embed(
title="Error!",
description="You are blacklisted from using the bot.",
color=0xE02B2B
)
print("A blacklisted user tried to execute a command.")
return await interaction.send(embed=embed, ephemeral=True)
elif isinstance(error, commands.errors.MissingPermissions):
embed = disnake.Embed(
title="Error!",
description="You are missing the permission(s) `" + ", ".join(
error.missing_permissions) + "` to execute this command!",
color=0xE02B2B
)
print("A blacklisted user tried to execute a command.")
return await interaction.send(embed=embed, ephemeral=True)
raise error
# The code in this event is executed every time a normal command has been *successfully* executed
@bot.event
async def on_command_completion(ctx):
fullCommandName = ctx.command.qualified_name
split = fullCommandName.split(" ")
executedCommand = str(split[0])
print(
f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.message.guild.id}) by {ctx.message.author} (ID: {ctx.message.author.id})")
# The code in this event is executed every time a normal valid command catches an error
@bot.event
async def on_command_error(context, error):
if isinstance(error, commands.CommandOnCooldown):
minutes, seconds = divmod(error.retry_after, 60)
hours, minutes = divmod(minutes, 60)
hours = hours % 24
embed = disnake.Embed(
title="Hey, please slow down!",
description=f"You can use this command again in {f"{round(hours)} hours" if round(hours) > 0 else ""} {f"{round(minutes)} minutes" if round(minutes) > 0 else ""} {f"{round(seconds)} seconds" if round(seconds) > 0 else ""}.",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingPermissions):
embed = disnake.Embed(
title="Error!",
description="You are missing the permission(s) `" + ", ".join(
error.missing_permissions) + "` to execute this command!",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed = disnake.Embed(
title="Error!",
description=str(error).capitalize(),
# We need to capitalize because the command arguments have no capital letter in the code.
color=0xE02B2B
)
await context.send(embed=embed)
raise error
# Run the bot with the token
bot.run(config["token"])
| """"
Copyright © Krypton 2021 - https://github.com/kkrypt0nn (https://krypt0n.co.uk)
Description:
This is a template to create your own discord bot in python.
Version: 4.0.1
"""
import json
import os
import platform
import random
import sys
import disnake
from disnake import ApplicationCommandInteraction
from disnake.ext import tasks, commands
from disnake.ext.commands import Bot
import exceptions
if not os.path.isfile("config.json"):
sys.exit("'config.json' not found! Please add it and try again.")
else:
with open("config.json") as file:
config = json.load(file)
"""
Setup bot intents (events restrictions)
For more information about intents, please go to the following websites:
https://docs.disnake.dev/en/latest/intents.html
https://docs.disnake.dev/en/latest/intents.html#privileged-intents
Default Intents:
intents.bans = True
intents.dm_messages = False
intents.dm_reactions = False
intents.dm_typing = False
intents.emojis = True
intents.guild_messages = True
intents.guild_reactions = True
intents.guild_typing = False
intents.guilds = True
intents.integrations = True
intents.invites = True
intents.reactions = True
intents.typing = False
intents.voice_states = False
intents.webhooks = False
Privileged Intents (Needs to be enabled on dev page), please use them only if you need them:
intents.members = True
intents.messages = True
intents.presences = True
"""
intents = disnake.Intents.default()
bot = Bot(command_prefix=config["prefix"], intents=intents)
# The code in this even is executed when the bot is ready
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
print(f"disnake API version: {disnake.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} {platform.release()} ({os.name})")
print("-------------------")
status_task.start()
# Setup the game status task of the bot
@tasks.loop(minutes=1.0)
async def status_task():
statuses = ["with you!", "with discord!", "with humans!"]
await bot.change_presence(activity=disnake.Game(random.choice(statuses)))
# Removes the default help command of discord.py to be able to create our custom help command.
bot.remove_command("help")
if __name__ == "__main__":
for file in os.listdir("./cogs"):
if file.endswith(".py"):
extension = file[:-3]
try:
bot.load_extension(f"cogs.{extension}")
print(f"Loaded extension '{extension}'")
except Exception as e:
exception = f"{type(e).__name__}: {e}"
print(f"Failed to load extension {extension}\n{exception}")
# The code in this event is executed every time someone sends a message, with or without the prefix
@bot.event
async def on_message(message: disnake.Message):
# Ignores if a command is being executed by a bot or by the bot itself
if message.author == bot.user or message.author.bot:
return
await bot.process_commands(message)
# The code in this event is executed every time a slash command has been *successfully* executed
@bot.event
async def on_slash_command(interaction: ApplicationCommandInteraction):
print(
f"Executed {interaction.data.name} command in {interaction.guild.name} (ID: {interaction.guild.id}) by {interaction.author} (ID: {interaction.author.id})")
# The code in this event is executed every time a valid slash command catches an error
@bot.event
async def on_slash_command_error(interaction: ApplicationCommandInteraction, error: Exception):
if isinstance(error, exceptions.UserBlacklisted):
"""
The code here will only execute if the error is an instance of 'UserBlacklisted', which can occur when using
the @checks.is_owner() check in your command, or you can raise the error by yourself.
'hidden=True' will make so that only the user who execute the command can see the message
"""
embed = disnake.Embed(
title="Error!",
description="You are blacklisted from using the bot.",
color=0xE02B2B
)
print("A blacklisted user tried to execute a command.")
return await interaction.send(embed=embed, ephemeral=True)
elif isinstance(error, commands.errors.MissingPermissions):
embed = disnake.Embed(
title="Error!",
description="You are missing the permission(s) `" + ", ".join(
error.missing_permissions) + "` to execute this command!",
color=0xE02B2B
)
print("A blacklisted user tried to execute a command.")
return await interaction.send(embed=embed, ephemeral=True)
raise error
# The code in this event is executed every time a normal command has been *successfully* executed
@bot.event
async def on_command_completion(ctx):
fullCommandName = ctx.command.qualified_name
split = fullCommandName.split(" ")
executedCommand = str(split[0])
print(
f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.message.guild.id}) by {ctx.message.author} (ID: {ctx.message.author.id})")
# The code in this event is executed every time a normal valid command catches an error
@bot.event
async def on_command_error(context, error):
if isinstance(error, commands.CommandOnCooldown):
minutes, seconds = divmod(error.retry_after, 60)
hours, minutes = divmod(minutes, 60)
hours = hours % 24
embed = disnake.Embed(
title="Hey, please slow down!",
description=f"You can use this command again in {f'{round(hours)} hours' if round(hours) > 0 else ''} {f'{round(minutes)} minutes' if round(minutes) > 0 else ''} {f'{round(seconds)} seconds' if round(seconds) > 0 else ''}.",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingPermissions):
embed = disnake.Embed(
title="Error!",
description="You are missing the permission(s) `" + ", ".join(
error.missing_permissions) + "` to execute this command!",
color=0xE02B2B
)
await context.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed = disnake.Embed(
title="Error!",
description=str(error).capitalize(),
# We need to capitalize because the command arguments have no capital letter in the code.
color=0xE02B2B
)
await context.send(embed=embed)
raise error
# Run the bot with the token
bot.run(config["token"])
|
def metade(n, f=False):
if f:
return f'{moedas(n / 2)}'
else:
return n / 2
def dobro(n, f=False):
if f:
return f'{moedas(n * 2)}'
else:
return n * 2
def aumentar(n, p=10, f=False):
if f:
return f'{moedas(n + (n * p / 100))}'
else:
return n + (n * p / 100)
def diminuir(n, p=13, f=False):
if f:
return f'{moedas(n - (n * p / 100))}'
else:
return n - (n * p / 100)
def moedas(f):
return f'R${f:.2f}'.replace('.', ',')
def resumo(p, a, r):
print('--' * 20)
print(f'{'RESUMO DO VALOR':^30}')
print('--' * 20)
print(f'{'Preço Analisado':<25}: {moedas(p)}')
print(f'{'Dobro do preço':<25}: {dobro(p, True)}')
print(f'{'Metade do preço':<25}: {metade(p, True)}')
print(f'{str(a) + '% de Aumento':<25}: {aumentar(p, a, True)}')
print(f'{str(r) + '% de Redução':<25}: {diminuir(p, r, True)}')
print('__' * 20) | def metade(n, f=False):
if f:
return f'{moedas(n / 2)}'
else:
return n / 2
def dobro(n, f=False):
if f:
return f'{moedas(n * 2)}'
else:
return n * 2
def aumentar(n, p=10, f=False):
if f:
return f'{moedas(n + (n * p / 100))}'
else:
return n + (n * p / 100)
def diminuir(n, p=13, f=False):
if f:
return f'{moedas(n - (n * p / 100))}'
else:
return n - (n * p / 100)
def moedas(f):
return f'R${f:.2f}'.replace('.', ',')
def resumo(p, a, r):
print('--' * 20)
print(f'{"RESUMO DO VALOR":^30}')
print('--' * 20)
print(f'{"Preço Analisado":<25}: {moedas(p)}')
print(f'{"Dobro do preço":<25}: {dobro(p, True)}')
print(f'{"Metade do preço":<25}: {metade(p, True)}')
print(f'{str(a) + "% de Aumento":<25}: {aumentar(p, a, True)}')
print(f'{str(r) + "% de Redução":<25}: {diminuir(p, r, True)}')
print('__' * 20) |
import deluca.core
from deluca.lung.core import Controller, ControllerState
from deluca.lung.utils import BreathWaveform
from deluca.lung.controllers import Expiratory
from deluca.lung.environments._stitched_sim import StitchedSimObservation
from deluca.lung.utils.data.transform import ShiftScaleTransform
from deluca.lung.core import DEFAULT_DT
from deluca.lung.core import proper_time
import jax
import jax.numpy as jnp
import optax
import numpy as np
import itertools
import torch
import flax.linen as fnn
DEFAULT_DT = 0.03
class Deep_network(fnn.Module):
H: int = 100
kernel_size: int = 5
out_dim: int = 1
@fnn.compact
def __call__(self, x):
x = fnn.Conv(features=self.H, kernel_size=self.kernel_size, name=f"deep_conv")(x)
x = fnn.relu(x)
x = x.reshape((x.shape[0], -1)) # flatten
x = fnn.Dense(features=1, use_bias=True, name=f"deep_fc")(x)
return x
class DeepControllerState(deluca.Obj):
waveform: deluca.Obj # waveform has to be here because it is subject to change during training
errs: jnp.array
time: float = float("inf")
steps: int = 0
dt: float = DEFAULT_DT
class Deep(Controller):
params: list = deluca.field(jaxed=True)
model: fnn.module = deluca.field(Deep_network, jaxed=False)
featurizer: jnp.array = deluca.field(jaxed=False)
H: int = deluca.field(100, jaxed=False)
input_dim: int = deluca.field(1, jaxed=False)
history_len: int = deluca.field(10, jaxed=False)
kernel_size: int = deluca.field(5, jaxed=False)
clip: float = deluca.field(40.0, jaxed=False)
normalize: bool = deluca.field(False, jaxed=False)
u_scaler: ShiftScaleTransform = deluca.field(jaxed=False)
p_scaler: ShiftScaleTransform = deluca.field(jaxed=False)
# bptt: int = deluca.field(1, jaxed=False) not used right now
# TODO: add analogue of activation=torch.nn.ReLU
def setup(self):
self.model = Deep_network(H=self.H, kernel_size=self.kernel_size, out_dim=1)
if self.params is None:
self.params = self.model.init(
jax.random.PRNGKey(0), jnp.expand_dims(jnp.ones([self.history_len]), axis=(0,1))
)["params"]
# linear feature transform:
# errs -> [average of last h errs, ..., average of last 2 errs, last err]
# emulates low-pass filter bank
self.featurizer = jnp.tril(jnp.ones((self.history_len, self.history_len)))
self.featurizer /= jnp.expand_dims(jnp.arange(self.history_len, 0, -1), axis = 0)
if self.normalize:
self.u_scaler = u_scaler
self.p_scaler = p_scaler
def init(self, waveform=BreathWaveform.create()):
errs = jnp.array([0.0] * self.history_len)
state = DeepControllerState(errs=errs, waveform=waveform)
return state
def __call__(self, controller_state, obs):
state, t = obs.predicted_pressure, obs.time
errs, waveform = controller_state.errs, controller_state.waveform
target = waveform.at(t)
if self.normalize:
target_scaled = self.p_scaler(target).squeeze()
state_scaled = self.p_scaler(state).squeeze()
next_errs = jnp.roll(errs, shift=-1)
next_errs = next_errs.at[-1].set(target_scaled - state_scaled)
else:
next_errs = jnp.roll(errs, shift=-1)
next_errs = next_errs.at[-1].set(target - state)
controller_state = controller_state.replace(errs=next_errs)
decay = waveform.decay(t)
def true_func(null_arg):
trajectory = jnp.expand_dims(next_errs[-self.history_len:], axis=(0,1))
u_in = self.model.apply({"params": self.params}, (trajectory @ self.featurizer))
return u_in.squeeze().astype(jnp.float32)
# changed decay compare from None to float(inf) due to cond requirements
u_in = jax.lax.cond(jnp.isinf(decay),
true_func,
lambda x : jnp.array(decay),
None)
u_in = jax.lax.clamp(0.0, u_in.astype(jnp.float32), self.clip).squeeze()
# update controller_state
new_dt = jnp.max(jnp.array([DEFAULT_DT, t - proper_time(controller_state.time)]))
new_time = t
new_steps = controller_state.steps + 1
controller_state = controller_state.replace(time=new_time, steps=new_steps, dt=new_dt)
return controller_state, u_in
'''
def train_global(
self,
sims,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 1e-3, "weight_decay": 1e-4},
loss_fn=torch.nn.L1Loss,
loss_fn_params={},
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
scheduler_params={"factor": 0.9, "patience": 10},
use_tqdm=True,
print_loss=1,
shuffle=False,
device="cpu",
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
loss_fn = loss_fn(**loss_fn_params)
tt = torch.linspace(0, duration, int(duration / dt))
losses = []
torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
for PIP, sim in itertools.product(PIPs, sims):
if pip_feed == "sequential":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
self.waveform = BreathWaveform((PEEP, PIP))
expiratory = Expiratory(waveform=self.waveform)
self.reset()
sim.reset()
for t in tt:
sim.pressure += use_noise * torch.normal(mean=torch.tensor(1.5), std=1.0)
pressure = sim.pressure
u_in = self(pressure, self.waveform.at(t), t)
u_out = expiratory(pressure, self.waveform.at(t), t)
sim.step(
u_in, u_out
) # potentially add multiplicative noise by * torch.normal(mean=torch.tensor(1.5), std=0.5)
if u_out == 0:
loss = loss + loss_fn(torch.tensor(self.waveform.at(t)), pressure)
if pip_feed == "sequential":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]["lr"]}"
)
if pip_feed == "parallel":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]["lr"]}"
)
return losses
'''
def rollout(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, loss):
waveform = BreathWaveform.create(custom_range=(PEEP, PIP))
expiratory = Expiratory.create(waveform=waveform)
controller_state = controller.init(waveform)
expiratory_state = expiratory.init()
sim_state, obs = sim.reset()
def loop_over_tt(ctrlState_expState_simState_obs_loss, t):
controller_state, expiratory_state, sim_state, obs, loss = ctrlState_expState_simState_obs_loss
mean = 1.5
std = 1.0
noise = mean + std * jax.random.normal(jax.random.PRNGKey(0), shape=())
pressure = sim_state.predicted_pressure + use_noise * noise
sim_state = sim_state.replace(predicted_pressure=pressure) # Need to update p_history as well or no?
obs = obs.replace(predicted_pressure=pressure, time=t)
controller_state, u_in = controller(controller_state, obs)
expiratory_state, u_out = expiratory(expiratory_state, obs)
sim_state, obs = sim(sim_state, (u_in, u_out))
loss = jax.lax.cond(u_out == 0,
lambda x: x + loss_fn(jnp.array(waveform.at(t)), pressure),
lambda x: x,
loss)
return (controller_state, expiratory_state, sim_state, obs, loss), None
(_, _, _, _, loss), _ = jax.lax.scan(loop_over_tt, (controller_state, expiratory_state, sim_state, obs, loss), tt)
return loss
def rollout_parallel(controller, sim, tt, use_noise, PEEP, PIPs, loss_fn):
loss = jnp.array(0.)
for PIP in PIPs:
loss = rollout(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, loss)
return loss
# TODO: add scheduler and scheduler_params
# Question: Jax analogue of torch.autograd.set_detect_anomaly(True)?
def deep_train(
controller,
sim,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=optax.adamw,
optimizer_params={"learning_rate": 1e-3, "weight_decay": 1e-4},
loss_fn=lambda x, y: (jnp.abs(x - y)).mean(),
# scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
# scheduler_params={"factor": 0.9, "patience": 10},
print_loss=1,
):
optim = optimizer(**optimizer_params)
optim_state = optim.init(controller)
tt = jnp.linspace(0, duration, int(duration / dt))
losses = []
# torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
value, grad = jax.value_and_grad(rollout_parallel)(controller, sim, tt, use_noise, PEEP, PIPs, loss_fn)
updates, optim_state = optim.update(grad, optim_state, controller)
controller = optax.apply_updates(controller, updates)
per_step_loss = value / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}"
)
if pip_feed == "sequential":
for PIP in PIPs:
value, grad = jax.value_and_grad(rollout)(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, jnp.array(0.))
updates, optim_state = optim.update(grad, optim_state, controller)
controller = optax.apply_updates(controller, updates)
per_step_loss = value / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}"
)
return controller
| import deluca.core
from deluca.lung.core import Controller, ControllerState
from deluca.lung.utils import BreathWaveform
from deluca.lung.controllers import Expiratory
from deluca.lung.environments._stitched_sim import StitchedSimObservation
from deluca.lung.utils.data.transform import ShiftScaleTransform
from deluca.lung.core import DEFAULT_DT
from deluca.lung.core import proper_time
import jax
import jax.numpy as jnp
import optax
import numpy as np
import itertools
import torch
import flax.linen as fnn
DEFAULT_DT = 0.03
class Deep_network(fnn.Module):
H: int = 100
kernel_size: int = 5
out_dim: int = 1
@fnn.compact
def __call__(self, x):
x = fnn.Conv(features=self.H, kernel_size=self.kernel_size, name=f"deep_conv")(x)
x = fnn.relu(x)
x = x.reshape((x.shape[0], -1)) # flatten
x = fnn.Dense(features=1, use_bias=True, name=f"deep_fc")(x)
return x
class DeepControllerState(deluca.Obj):
waveform: deluca.Obj # waveform has to be here because it is subject to change during training
errs: jnp.array
time: float = float("inf")
steps: int = 0
dt: float = DEFAULT_DT
class Deep(Controller):
params: list = deluca.field(jaxed=True)
model: fnn.module = deluca.field(Deep_network, jaxed=False)
featurizer: jnp.array = deluca.field(jaxed=False)
H: int = deluca.field(100, jaxed=False)
input_dim: int = deluca.field(1, jaxed=False)
history_len: int = deluca.field(10, jaxed=False)
kernel_size: int = deluca.field(5, jaxed=False)
clip: float = deluca.field(40.0, jaxed=False)
normalize: bool = deluca.field(False, jaxed=False)
u_scaler: ShiftScaleTransform = deluca.field(jaxed=False)
p_scaler: ShiftScaleTransform = deluca.field(jaxed=False)
# bptt: int = deluca.field(1, jaxed=False) not used right now
# TODO: add analogue of activation=torch.nn.ReLU
def setup(self):
self.model = Deep_network(H=self.H, kernel_size=self.kernel_size, out_dim=1)
if self.params is None:
self.params = self.model.init(
jax.random.PRNGKey(0), jnp.expand_dims(jnp.ones([self.history_len]), axis=(0,1))
)["params"]
# linear feature transform:
# errs -> [average of last h errs, ..., average of last 2 errs, last err]
# emulates low-pass filter bank
self.featurizer = jnp.tril(jnp.ones((self.history_len, self.history_len)))
self.featurizer /= jnp.expand_dims(jnp.arange(self.history_len, 0, -1), axis = 0)
if self.normalize:
self.u_scaler = u_scaler
self.p_scaler = p_scaler
def init(self, waveform=BreathWaveform.create()):
errs = jnp.array([0.0] * self.history_len)
state = DeepControllerState(errs=errs, waveform=waveform)
return state
def __call__(self, controller_state, obs):
state, t = obs.predicted_pressure, obs.time
errs, waveform = controller_state.errs, controller_state.waveform
target = waveform.at(t)
if self.normalize:
target_scaled = self.p_scaler(target).squeeze()
state_scaled = self.p_scaler(state).squeeze()
next_errs = jnp.roll(errs, shift=-1)
next_errs = next_errs.at[-1].set(target_scaled - state_scaled)
else:
next_errs = jnp.roll(errs, shift=-1)
next_errs = next_errs.at[-1].set(target - state)
controller_state = controller_state.replace(errs=next_errs)
decay = waveform.decay(t)
def true_func(null_arg):
trajectory = jnp.expand_dims(next_errs[-self.history_len:], axis=(0,1))
u_in = self.model.apply({"params": self.params}, (trajectory @ self.featurizer))
return u_in.squeeze().astype(jnp.float32)
# changed decay compare from None to float(inf) due to cond requirements
u_in = jax.lax.cond(jnp.isinf(decay),
true_func,
lambda x : jnp.array(decay),
None)
u_in = jax.lax.clamp(0.0, u_in.astype(jnp.float32), self.clip).squeeze()
# update controller_state
new_dt = jnp.max(jnp.array([DEFAULT_DT, t - proper_time(controller_state.time)]))
new_time = t
new_steps = controller_state.steps + 1
controller_state = controller_state.replace(time=new_time, steps=new_steps, dt=new_dt)
return controller_state, u_in
'''
def train_global(
self,
sims,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=torch.optim.Adam,
optimizer_params={"lr": 1e-3, "weight_decay": 1e-4},
loss_fn=torch.nn.L1Loss,
loss_fn_params={},
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
scheduler_params={"factor": 0.9, "patience": 10},
use_tqdm=True,
print_loss=1,
shuffle=False,
device="cpu",
):
optimizer = optimizer(self.parameters(), **optimizer_params)
scheduler = scheduler(optimizer, **scheduler_params)
loss_fn = loss_fn(**loss_fn_params)
tt = torch.linspace(0, duration, int(duration / dt))
losses = []
torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
for PIP, sim in itertools.product(PIPs, sims):
if pip_feed == "sequential":
self.zero_grad()
loss = torch.tensor(0.0, device=device, requires_grad=True)
self.waveform = BreathWaveform((PEEP, PIP))
expiratory = Expiratory(waveform=self.waveform)
self.reset()
sim.reset()
for t in tt:
sim.pressure += use_noise * torch.normal(mean=torch.tensor(1.5), std=1.0)
pressure = sim.pressure
u_in = self(pressure, self.waveform.at(t), t)
u_out = expiratory(pressure, self.waveform.at(t), t)
sim.step(
u_in, u_out
) # potentially add multiplicative noise by * torch.normal(mean=torch.tensor(1.5), std=0.5)
if u_out == 0:
loss = loss + loss_fn(torch.tensor(self.waveform.at(t)), pressure)
if pip_feed == "sequential":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
if pip_feed == "parallel":
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step(loss)
per_step_loss = loss / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}\tLR: {optimizer.param_groups[0]['lr']}"
)
return losses
'''
def rollout(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, loss):
waveform = BreathWaveform.create(custom_range=(PEEP, PIP))
expiratory = Expiratory.create(waveform=waveform)
controller_state = controller.init(waveform)
expiratory_state = expiratory.init()
sim_state, obs = sim.reset()
def loop_over_tt(ctrlState_expState_simState_obs_loss, t):
controller_state, expiratory_state, sim_state, obs, loss = ctrlState_expState_simState_obs_loss
mean = 1.5
std = 1.0
noise = mean + std * jax.random.normal(jax.random.PRNGKey(0), shape=())
pressure = sim_state.predicted_pressure + use_noise * noise
sim_state = sim_state.replace(predicted_pressure=pressure) # Need to update p_history as well or no?
obs = obs.replace(predicted_pressure=pressure, time=t)
controller_state, u_in = controller(controller_state, obs)
expiratory_state, u_out = expiratory(expiratory_state, obs)
sim_state, obs = sim(sim_state, (u_in, u_out))
loss = jax.lax.cond(u_out == 0,
lambda x: x + loss_fn(jnp.array(waveform.at(t)), pressure),
lambda x: x,
loss)
return (controller_state, expiratory_state, sim_state, obs, loss), None
(_, _, _, _, loss), _ = jax.lax.scan(loop_over_tt, (controller_state, expiratory_state, sim_state, obs, loss), tt)
return loss
def rollout_parallel(controller, sim, tt, use_noise, PEEP, PIPs, loss_fn):
loss = jnp.array(0.)
for PIP in PIPs:
loss = rollout(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, loss)
return loss
# TODO: add scheduler and scheduler_params
# Question: Jax analogue of torch.autograd.set_detect_anomaly(True)?
def deep_train(
controller,
sim,
pip_feed="parallel",
duration=3,
dt=0.03,
epochs=100,
use_noise=False,
optimizer=optax.adamw,
optimizer_params={"learning_rate": 1e-3, "weight_decay": 1e-4},
loss_fn=lambda x, y: (jnp.abs(x - y)).mean(),
# scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau,
# scheduler_params={"factor": 0.9, "patience": 10},
print_loss=1,
):
optim = optimizer(**optimizer_params)
optim_state = optim.init(controller)
tt = jnp.linspace(0, duration, int(duration / dt))
losses = []
# torch.autograd.set_detect_anomaly(True)
PIPs = [10, 15, 20, 25, 30, 35]
PEEP = 5
# TODO: handle device-awareness
for epoch in range(epochs):
if pip_feed == "parallel":
value, grad = jax.value_and_grad(rollout_parallel)(controller, sim, tt, use_noise, PEEP, PIPs, loss_fn)
updates, optim_state = optim.update(grad, optim_state, controller)
controller = optax.apply_updates(controller, updates)
per_step_loss = value / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}\tLoss: {per_step_loss:.2f}"
)
if pip_feed == "sequential":
for PIP in PIPs:
value, grad = jax.value_and_grad(rollout)(controller, sim, tt, use_noise, PEEP, PIP, loss_fn, jnp.array(0.))
updates, optim_state = optim.update(grad, optim_state, controller)
controller = optax.apply_updates(controller, updates)
per_step_loss = value / len(tt)
losses.append(per_step_loss)
if epoch % print_loss == 0:
print(
f"Epoch: {epoch}, PIP: {PIP}\tLoss: {per_step_loss:.2f}"
)
return controller
|
from pycamia import info_manager
__info__ = info_manager(
project = "PyZMyc",
package = "pyoverload",
fileinfo = "Useful tools for decorators."
)
__all__ = """
raw_function
return_type_wrapper
decorator
""".split()
import sys
from functools import wraps
def raw_function(func):
if hasattr(func, "__func__"):
return func.__func__
return func
def _get_wrapped(f):
while hasattr(f, '__wrapped__'): f = f.__wrapped__
return f
def decorator(wrapper_func):
if not callable(wrapper_func): raise TypeError("@decorator wrapping a non-wrapper")
def wrapper(*args, **kwargs):
if not kwargs and len(args) == 1:
func = args[0]
raw_func = raw_function(func)
if callable(raw_func):
func_name = f"{raw_func.__name__}[{wrapper_func.__qualname__.split(".")[0]}]"
wrapped_func = wraps(raw_func)(wrapper_func(raw_func))
wrapped_func.__name__ = func_name
wrapped_func.__doc__ = raw_func.__doc__
# return wrapped_func
if 'staticmethod' in str(type(func)): trans = staticmethod
elif 'classmethod' in str(type(func)): trans = classmethod
else: trans = lambda x: x
return trans(wrapped_func)
return decorator(wrapper_func(*args, **kwargs))
return wraps(wrapper_func)(wrapper)
def _mid(x): return x[1] if len(x) > 1 else x[0]
def _rawname(s): return _mid(str(s).split("'"))
stack_error = lambda x: TypeError(f"Unexpected function stack for {x}, please contact the developer for further information. ")
def _get_frames():
frames = []
frame = sys._getframe()
fname = frame.f_back.f_code.co_name
while frame is not None:
frame_file = _rawname(frame)
if frame_file.startswith('<') and frame_file.endswith('>') and frame_file != '<stdin>':
frame = frame.f_back
continue
frames.append(frame)
if len(frames) >= 4: return frames[2:]
frame = frame.f_back
raise stack_error(fname)
def get_environ_locals():
_, client_frame = _get_frames()
return client_frame.f_locals
def get_environ_globals():
_, client_frame = _get_frames()
return client_frame.f_globals
|
from pycamia import info_manager
__info__ = info_manager(
project = "PyZMyc",
package = "pyoverload",
fileinfo = "Useful tools for decorators."
)
__all__ = """
raw_function
return_type_wrapper
decorator
""".split()
import sys
from functools import wraps
def raw_function(func):
if hasattr(func, "__func__"):
return func.__func__
return func
def _get_wrapped(f):
while hasattr(f, '__wrapped__'): f = f.__wrapped__
return f
def decorator(wrapper_func):
if not callable(wrapper_func): raise TypeError("@decorator wrapping a non-wrapper")
def wrapper(*args, **kwargs):
if not kwargs and len(args) == 1:
func = args[0]
raw_func = raw_function(func)
if callable(raw_func):
func_name = f"{raw_func.__name__}[{wrapper_func.__qualname__.split('.')[0]}]"
wrapped_func = wraps(raw_func)(wrapper_func(raw_func))
wrapped_func.__name__ = func_name
wrapped_func.__doc__ = raw_func.__doc__
# return wrapped_func
if 'staticmethod' in str(type(func)): trans = staticmethod
elif 'classmethod' in str(type(func)): trans = classmethod
else: trans = lambda x: x
return trans(wrapped_func)
return decorator(wrapper_func(*args, **kwargs))
return wraps(wrapper_func)(wrapper)
def _mid(x): return x[1] if len(x) > 1 else x[0]
def _rawname(s): return _mid(str(s).split("'"))
stack_error = lambda x: TypeError(f"Unexpected function stack for {x}, please contact the developer for further information. ")
def _get_frames():
frames = []
frame = sys._getframe()
fname = frame.f_back.f_code.co_name
while frame is not None:
frame_file = _rawname(frame)
if frame_file.startswith('<') and frame_file.endswith('>') and frame_file != '<stdin>':
frame = frame.f_back
continue
frames.append(frame)
if len(frames) >= 4: return frames[2:]
frame = frame.f_back
raise stack_error(fname)
def get_environ_locals():
_, client_frame = _get_frames()
return client_frame.f_locals
def get_environ_globals():
_, client_frame = _get_frames()
return client_frame.f_globals
|
import inspect
import os
from unittest import TestCase
import boto3
from decouple import config
from loguru import logger
from moto.s3 import mock_s3
from constants import TEMP_FILE_FOLDER
from services.s3_service import S3Service
class TestS3Service(TestCase):
""" Test S3Service class """
@mock_s3
def test_upload_object(self):
"""
Test upload object to S3
:return: url of the uploaded object
"""
logger.debug(
f"{self.__class__.__name__}.{inspect.currentframe().f_code.co_name}"
)
s3_client = boto3.client("s3", region_name=config("AWS_REGION"))
s3_client.create_bucket(
Bucket=config("AWS_BUCKET"),
CreateBucketConfiguration={"LocationConstraint": config("AWS_REGION")},
)
s3_service = S3Service()
object_name = "index.jpg"
path = os.path.join(TEMP_FILE_FOLDER, object_name)
url = s3_service.upload_object(path, object_name, expiration_time=3600)
assert url is not None and (
"s3.amazonaws.com" and "https://" and f"{config("AWS_BUCKET")}" in url
)
logger.info(url)
| import inspect
import os
from unittest import TestCase
import boto3
from decouple import config
from loguru import logger
from moto.s3 import mock_s3
from constants import TEMP_FILE_FOLDER
from services.s3_service import S3Service
class TestS3Service(TestCase):
""" Test S3Service class """
@mock_s3
def test_upload_object(self):
"""
Test upload object to S3
:return: url of the uploaded object
"""
logger.debug(
f"{self.__class__.__name__}.{inspect.currentframe().f_code.co_name}"
)
s3_client = boto3.client("s3", region_name=config("AWS_REGION"))
s3_client.create_bucket(
Bucket=config("AWS_BUCKET"),
CreateBucketConfiguration={"LocationConstraint": config("AWS_REGION")},
)
s3_service = S3Service()
object_name = "index.jpg"
path = os.path.join(TEMP_FILE_FOLDER, object_name)
url = s3_service.upload_object(path, object_name, expiration_time=3600)
assert url is not None and (
"s3.amazonaws.com" and "https://" and f"{config('AWS_BUCKET')}" in url
)
logger.info(url)
|
import argparse
import base64
import json
import logging
import logging.handlers
import re
import requirements
import time
import http
import urllib.request
import xml.etree.ElementTree as ET
import os
import sys
from datetime import datetime
import github3
import warnings
import PyDigger.common
requirements_fields = ['requirements', 'test_requirements']
# Updated:
# 1) All the entries that don't have last_update field
# 2) All the entries that were updated more than N days ago
# 3) All the entries that were updated in the last N days ??
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--screen', help='Log to the screen', action='store_true')
parser.add_argument('--log', help='Set logging level to DEBUG or INFO (or keep it at the default WARNING)', default='WARNING')
parser.add_argument('--update', help='update the entries: rss - the ones received via rss; deps - dependencies; all - all of the packages already in the database; url - provide the url of a github repository')
parser.add_argument('--name', help='Name of the package to update')
parser.add_argument('--sleep', help='How many seconds to sleep between packages (Help avoiding the GitHub API limit)', type=float)
parser.add_argument('--url', help='URL of a github repository')
parser.add_argument('--limit', help='Max number of packages to investigate. (Used during testing and development)', type=int)
args = parser.parse_args()
return args
class PyPackage(object):
def __init__(self, name):
self.lcname = name.lower()
self.entry = {}
self.config = PyDigger.common.read_config()
self.setup_github()
def setup_github(self):
logger = logging.getLogger('PyDigger.fetch')
token = os.environ.get('GITHUB_TOKEN')
if not token:
token = self.config['github-token']
if not token:
logger.error("No github token found")
self.github = None
return
self.github = github3.login(token=token)
def get_details(self):
logger = logging.getLogger('PyDigger.fetch')
logger.debug("get_details of " + self.lcname)
url = 'https://pypi.org/pypi/' + self.lcname + '/json'
logger.debug(f"Fetching url {url}")
try:
f = urllib.request.urlopen(url)
json_data = f.read()
f.close()
#print(json_data)
except (urllib.request.HTTPError, urllib.request.URLError, http.client.InvalidURL, ConnectionError) as err:
logger.error(f"Could not fetch details of PyPI package from '{url}'. Error: {type(err)}: {err}")
#self.entry['json_missing'] = True
#self.save()
return
except Exception:
logger.exception(f"Could not fetch details of PyPI package from '{url}'")
return
package_data = json.loads(json_data)
#logger.debug(f'package_data: {package_data}'))
if 'info' in package_data:
info = package_data['info']
if 'home_page' in info:
self.entry['home_page'] = info['home_page']
# package_url we can deduct this from the name
# _pypi_hidden
# _pypi_ordering
# release_url
# downloads - a hash, but as we are monitoring recent uploads, this will be mostly 0
# classifiers - an array of stuff
# releases
# urls
for field in ['name', 'maintainer', 'docs_url', 'requires_python', 'maintainer_email',
'cheesecake_code_kwalitee_id', 'cheesecake_documentation_id', 'cheesecake_installability_id',
'keywords', 'author', 'author_email', 'download_url', 'platform', 'description', 'bugtrack_url',
'license', 'summary', 'version']:
if field in info:
self.entry[field] = info[field]
self.entry['split_keywords'] = []
if 'keywords' in info:
keywords = info['keywords']
if keywords is not None and keywords != "":
logger.debug(f"keywords '{keywords}'")
logger.debug(f"keywords type '{keywords.__class__.__name__}'")
#if keywords.__class__.__name__ == 'bytes':
# keywords = keywords.decode('utf8')
#keywords = keywords.encode('utf-8')
keywords = keywords.lower()
if re.search(',', keywords):
self.entry['split_keywords'] = keywords.split(',')
else:
self.entry['split_keywords'] = keywords.split(' ')
self.process_release(package_data)
self.extract_vcs()
if self.entry['github']:
try:
self.check_github()
except github3.exceptions.NotFoundError:
logger.error(f"404 NotFountError while trying to get data from GitHub: '{self.entry["home_page"]}'")
except Exception:
logger.exception(f"Error while trying to get data from GitHub: '{self.entry["home_page"]}'")
self.entry['lcname'] = self.entry['name'].lower()
self.download_pkg()
self.save()
def extract_vcs(self):
logger = logging.getLogger('PyDigger.fetch')
vcs_found = False
# https://github.com/szabgab/pydigger.com/
# https://bitbucket.org/ensighten-ondemand/dataintelligence-exports-cli
# http://gitlab.com/dimasmjunior/classic
vcs_es = {
'github': {
'host': 'github.com',
'regex': r'^https?://(www\.)?github.com/([^/]+)/([^/]+)/?$',
},
'gitlab': {
'host': 'gitlab.com',
'regex': r'^https?://(www\.)?gitlab.com/([^/]+)/([^/]+)/?$',
},
'bitbucket': {
'host': 'bitbucket.org',
'regex': r'^https?://(www\.)?bitbucket.org/([^/]+)/([^/]+)/?$',
}
}
for vcs in vcs_es:
self.entry[vcs] = False
for vcs in vcs_es:
if 'home_page' in self.entry and self.entry['home_page'] is not None:
vcs_url = self.entry['home_page']
match = re.search(vcs_es[vcs]['regex'], self.entry['home_page'])
if match:
self.entry[vcs] = True
self.entry[f'{vcs}_user'] = match.group(2)
self.entry[f'{vcs}_project'] = match.group(3)
logger.info(f"Project {self.lcname} Version {self.entry["version"]} has VCS {vcs}: {vcs_url}")
vcs_found = True
break
if not vcs_found:
logger.info(f"No VCS found for project {self.lcname} Version {self.entry["version"]}")
def process_release(self, package_data):
logger = logging.getLogger('PyDigger.fetch')
version = self.entry['version']
if 'urls' in package_data:
self.entry['urls'] = package_data['urls']
if 'releases' not in package_data:
logger.error(f"There are no releases in package '{self.lcname}' --- {package_data}")
elif version not in package_data['releases']:
logger.error(f"Version '{version}' is not in the releases of package '{self.lcname}' --- {package_data}")
elif len(package_data['releases'][version]) == 0:
logger.error(f"Version '{version}' has no elements in the releases of package {self.lcname} --- {package_data}")
else:
# find the one that has python_version: "source",
# actually we find the first one that has python_version: source
# maybe there are more?
source = package_data['releases'][version][0]
for version_pack in package_data['releases'][version]:
if 'python_version' in version_pack and version_pack['python_version'] == 'source':
if 'url' in version_pack:
self.entry['download_url'] = version_pack['url']
else:
logger.error(f"Version '{version}' has no download_url in the releases of package {self.lcname} --- {package_data}")
source = version_pack
break
#url: https://pypi.org/packages/ce/c7/6431a8ba802bf93d611bfd53c05abcc078165b8aad3603d66c02a847af7d/codacy-coverage-1.2.10.tar.gz
#filename: codacy-coverage-1.2.10.tar.gz
#url: https://pypi.org/packages/84/85/5ce28077fbf455ddf0ba2506cdfdc2e5caa0822b8a4a2747da41b683fad8/purepng-0.1.3.zip
if 'upload_time' not in source:
logger.error(f"upload_time is missing from version {version} in the releases of package {self.name} --- {package_data}")
else:
upload_time = source['upload_time']
self.entry['upload_time'] = datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S")
def check_github(self):
logger = logging.getLogger('PyDigger.fetch')
logger.debug("check_github user='{}', project='{}".format(self.entry['github_user'], self.entry['github_project']))
if not self.github:
return
repo = self.github.repository(self.entry['github_user'], self.entry['github_project'])
if not repo:
logger.error("Could not fetch GitHub repository for {}".format(self.entry['name']))
self.entry['error'] = "Could not fetch GitHub repository"
return
logger.debug(f"default_branch: {repo.default_branch}")
# get the last commit of the default branch
branch = repo.branch(repo.default_branch)
if not branch:
logger.error("Could not fetch GitHub branch {} for {}".format(repo.default_branch, self.entry['name']))
self.entry['error'] = "Could not fetch GitHub branch"
return
last_sha = branch.commit.sha
logger.debug(f"last_sha: {last_sha}")
t = repo.tree(last_sha, recursive=True)
self.entry['travis_ci'] = False
self.entry['coveralls'] = False
self.entry['github_actions'] = False
for e in t.tree:
if e.path == '.travis.yml':
self.entry['travis_ci'] = True
if re.search(r'^.github/workflows/.*\.ya?ml$',e.path):
self.entry['github_actions'] = True
if e.path == '.coveragerc':
self.entry['coveralls'] = True
if e.path == 'tox.ini':
self.entry['tox'] = True # http://codespeak.net/tox/
if e.path == 'circle.yml':
self.entry['circle'] = True # https://circleci.com/
if e.path == 'appveyor.yml':
self.entry['appveyor'] = True # https://www.appveyor.com/
if e.path == '.appveyor.yml':
self.entry['appveyor'] = True # https://www.appveyor.com/
if e.path == '.editconfig':
self.entry['editconfig'] = True # http://editorconfig.org/
if e.path == 'dockbot.json':
self.entry['dockbot'] = True # https://github.com/CauldronDevelopmentLLC/dockbot
if e.path == '.landscape.yml':
self.entry['landscape'] = True # https://help.ubuntu.com/lts/clouddocs/en/Installing-Landscape.html
for field in requirements_fields:
if e.path == field + '.txt':
self.entry[field] = []
try:
fh = urllib.request.urlopen(e.url)
as_json = fh.read()
file_info = json.loads(as_json)
content = base64.b64decode(file_info['content'])
logger.debug(f"content type: {content.__class__.__name__}")
logger.debug(f"content: {content}")
if content.__class__.__name__ == 'bytes':
content = content.decode('utf8')
# https://github.com/ingresso-group/pyticketswitch/blob/master/requirements.txt
# contains -r requirements/common.txt which means we need to fetch that file as well
# for now let's just skip this
match = re.search(r'^\s*-r', content)
if not match:
# Capture: UserWarning: Private repos not supported. Skipping.
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
for req in requirements.parse(content):
logger.debug(f"{field}: {req.name} {req.specs} {req.extras}")
# we cannot use the req.name as a key in the dictionary as some of the package names have a . in them
# and MongoDB does not allow . in fieldnames.
self.entry[field].append({ 'name' : req.name, 'specs' : req.specs })
for w in warn:
logger.warning(str(w))
except urllib.error.HTTPError as err:
logger.error(f"Exception when handling the {field}.txt: {err}")
if "rate limit exceeded" in err:
time.sleep(2)
except Exception:
logger.exception(f"Exception when handling the {field}.txt")
logger.debug("github finished")
return
# In the database have a mark that says if the package was already
# downloaded (or not)
# extracted (or not)
def download_pkg(self):
"""Use ``urllib.request.urlretrieve`` to download package to file in sandbox
dir.
"""
logger = logging.getLogger('PyDigger.fetch')
if 'download_url' not in self.entry or self.entry['download_url'] is None:
logger.info("No download_url")
return()
logger.info('download_url {}'.format(self.entry['download_url']))
#if 'local_dir' in self.entry:
# logger.info('')
match = re.search(r'/([^/]+)(\.tar\.gz)$', self.entry['download_url'])
if match:
# local_dir is the name of the file that should be the name of the local directory
local_dir = match.group(1)
extension = match.group(2)
else:
logger.warning("Unsupported download file format: '{}'".format(self.entry['download_url']))
return()
logger.info(f"local_dir '{local_dir}' extension '{extension}'")
src_dir = PyDigger.common.get_source_dir()
logger.info(f"Source directory: {src_dir}")
# TODO use the requests module to download the zipfile
# self.downloaded_from_url = True
def save(self):
logger = logging.getLogger('PyDigger.fetch')
entry = self.entry
logger.info("save_entry: '{}'".format(entry['name']))
#logger.debug("save_entry: {}".format(e)
#my_entries.append(e)
#print(e)
# TODO make sure we only add newer version!
# Version numbers I've seen:
# 1.0.3
# 20160325.161225
# 0.2.0.dev20160325161211
# 3.1.0a12
# 2.0.0.dev11
#doc = db.packages.find_one({'name' : e['name']})
#if doc:
#print(doc)
db.packages.remove({'name' : entry['name']})
db.packages.remove({'name' : entry['name'].lower()})
res = db.packages.insert(entry)
logger.info("INSERT res='{}'".format(res))
def setup_logger(args):
if args.log and args.log.upper() in ['DEBUG', 'INFO', 'WARNING']:
log_level = getattr(logging, args.log.upper())
else:
exit(f'Invalid --log parameter {args.log}')
logger = logging.getLogger('PyDigger')
logger.setLevel(log_level)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-10s - %(message)s')
if args.screen:
sh = logging.StreamHandler()
sh.setLevel(log_level)
sh.setFormatter(log_format)
logger.addHandler(sh)
else:
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
log_file = os.path.join(project_root, 'log', 'fetch.log')
ch = logging.handlers.RotatingFileHandler(log_file, maxBytes=10_000_000, backupCount=10)
ch.setLevel(log_level)
ch.setFormatter(log_format)
logger.addHandler(ch)
logger.info("======================== Starting =================================")
def setup_db():
global db
db = PyDigger.common.get_db()
def setup(args):
setup_db()
setup_logger(args)
def main():
args = get_args()
setup(args)
logger = logging.getLogger('PyDigger.fetch')
logger.info("Starting main")
src_dir = PyDigger.common.get_source_dir()
logger.info("Source directory: {}".format(src_dir))
names = []
packages = None
if args.update:
logger.debug("update: {}".format(args.update))
if args.update == 'rss':
packages = get_from_rss()
elif args.update == 'url':
package = PyPackage("foo")
package.entry['home_page'] = args.url
package.entry['version'] = 0
package.extract_vcs()
package.check_github()
elif args.update == 'deps':
logger.info("Listing dependencies")
seen = {}
for field in requirements_fields:
packages_with_requirements = db.packages.find({field : { '$exists' : True }}, { 'name' : True, field : True})
for p in packages_with_requirements:
for r in p[field]:
name = r['name']
if not name:
logger.info("{} {} found without a name in package {}".format(field, r, p))
continue
if name not in seen:
seen[name] = True
p = db.packages.find_one({'lcname': name.lower()})
if not p:
names.append(name)
elif args.update == 'all':
packages = db.packages.find({}, {'name': True})
elif re.search(r'^\d+$', args.update):
packages = db.packages.find().sort([('pubDate', 1)]).limit(int(args.update))
else:
logger.error("The update option '{}' is not implemented yet".format(args.update))
if packages:
names = [ p['name'] for p in packages ]
elif args.name:
names.append(args.name)
else:
exit(f"Missing --update or --name. Run '{sys.argv[0]} -h' to get help.")
update_packages(args, names)
PyDigger.common.update_cache()
logger.info("Finished")
def update_packages(args, names):
logger = logging.getLogger('PyDigger.fetch')
count = 0
logger.info("Start updating packages")
for name in names:
count += 1
if args.limit and count > args.limit:
break
package = PyPackage(name)
package.get_details()
if args.sleep:
#logger.debug('sleeping {args.sleep}')
time.sleep(args.sleep)
# going over the RSS feed most recent first
def get_from_rss():
logger = logging.getLogger('PyDigger.fetch')
logger.debug("get_from_rss")
rss_data = get_rss()
packages = []
seen_names = []
try:
root = ET.fromstring(rss_data)
except Exception as err:
logger.error(f"Could not parse rss_data\n{err}")
return packages
# seen: xml.etree.ElementTree.ParseError: not well-formed (invalid token)
for item in root.iter('item'):
title = item.find('title')
name, version = title.text.split(' ')
logger.debug(f"Processing '{name}' '{version}'")
lcname = name.lower()
# The same package can appear in the RSS feed twice. We only need to process it once.
if lcname in seen_names:
continue
description = item.find('description').text
pubDate = item.find('pubDate').text
logger.debug(f"Description {description}")
logger.debug(f"pubDate {pubDate}")
# Tue, 01 Oct 2019 18:14:51 GMT
try:
if pubDate[-4:] == ' GMT':
upload_time = datetime.strptime(pubDate[0:-4], "%a, %d %b %Y %H:%M:%S")
else:
upload_time = datetime.strptime(pubDate, "%d %b %Y %H:%M:%S %Z")
except Exception as err:
logger.error(f"Could not parse time '{pubDate}'\n{err}")
continue
entry = {
'name' : name,
'lcname' : lcname,
'summary' : description,
'upload_time' : upload_time,
}
# If this package is already in the database we only need to process if
# the one coming in the RSS feed has a different (hopefully newer) version
# number but if it is not in the database we can already save it
# This still does not solve the problem of packages that have no upload_time
# in their JSON file. Especially if we try to add such a package by name
# and not from the RSS feed
# TODO: check if the new version number is higher than the old one!
doc = db.packages.find_one({'lcname' : lcname})
if doc:
old_version = doc.get('version', '')
if version == old_version:
logger.debug("Skipping '{name}' '{version}'. It is already in the database with this version")
continue
logger.debug("Update '{name}' from '{old_version}' to '{version}'. It is already in the database with this version")
seen_names.append(lcname)
packages.append(entry)
return packages
def get_rss():
logger = logging.getLogger('PyDigger.fetch')
latest_url = 'https://pypi.org/rss/updates.xml'
logger.debug('get_rss from ' + latest_url)
try:
f = urllib.request.urlopen(latest_url)
rss_data = f.read()
f.close()
#raise Exception("hello")
except (urllib.reques.HTTPError, urllib.request.URLError):
logger.exception('Error while fetching ' + latest_url)
raise Exception('Could not fetch RSS feed ' + latest_url)
#logger.debug(rss_data)
return rss_data
| import argparse
import base64
import json
import logging
import logging.handlers
import re
import requirements
import time
import http
import urllib.request
import xml.etree.ElementTree as ET
import os
import sys
from datetime import datetime
import github3
import warnings
import PyDigger.common
requirements_fields = ['requirements', 'test_requirements']
# Updated:
# 1) All the entries that don't have last_update field
# 2) All the entries that were updated more than N days ago
# 3) All the entries that were updated in the last N days ??
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--screen', help='Log to the screen', action='store_true')
parser.add_argument('--log', help='Set logging level to DEBUG or INFO (or keep it at the default WARNING)', default='WARNING')
parser.add_argument('--update', help='update the entries: rss - the ones received via rss; deps - dependencies; all - all of the packages already in the database; url - provide the url of a github repository')
parser.add_argument('--name', help='Name of the package to update')
parser.add_argument('--sleep', help='How many seconds to sleep between packages (Help avoiding the GitHub API limit)', type=float)
parser.add_argument('--url', help='URL of a github repository')
parser.add_argument('--limit', help='Max number of packages to investigate. (Used during testing and development)', type=int)
args = parser.parse_args()
return args
class PyPackage(object):
def __init__(self, name):
self.lcname = name.lower()
self.entry = {}
self.config = PyDigger.common.read_config()
self.setup_github()
def setup_github(self):
logger = logging.getLogger('PyDigger.fetch')
token = os.environ.get('GITHUB_TOKEN')
if not token:
token = self.config['github-token']
if not token:
logger.error("No github token found")
self.github = None
return
self.github = github3.login(token=token)
def get_details(self):
logger = logging.getLogger('PyDigger.fetch')
logger.debug("get_details of " + self.lcname)
url = 'https://pypi.org/pypi/' + self.lcname + '/json'
logger.debug(f"Fetching url {url}")
try:
f = urllib.request.urlopen(url)
json_data = f.read()
f.close()
#print(json_data)
except (urllib.request.HTTPError, urllib.request.URLError, http.client.InvalidURL, ConnectionError) as err:
logger.error(f"Could not fetch details of PyPI package from '{url}'. Error: {type(err)}: {err}")
#self.entry['json_missing'] = True
#self.save()
return
except Exception:
logger.exception(f"Could not fetch details of PyPI package from '{url}'")
return
package_data = json.loads(json_data)
#logger.debug(f'package_data: {package_data}'))
if 'info' in package_data:
info = package_data['info']
if 'home_page' in info:
self.entry['home_page'] = info['home_page']
# package_url we can deduct this from the name
# _pypi_hidden
# _pypi_ordering
# release_url
# downloads - a hash, but as we are monitoring recent uploads, this will be mostly 0
# classifiers - an array of stuff
# releases
# urls
for field in ['name', 'maintainer', 'docs_url', 'requires_python', 'maintainer_email',
'cheesecake_code_kwalitee_id', 'cheesecake_documentation_id', 'cheesecake_installability_id',
'keywords', 'author', 'author_email', 'download_url', 'platform', 'description', 'bugtrack_url',
'license', 'summary', 'version']:
if field in info:
self.entry[field] = info[field]
self.entry['split_keywords'] = []
if 'keywords' in info:
keywords = info['keywords']
if keywords is not None and keywords != "":
logger.debug(f"keywords '{keywords}'")
logger.debug(f"keywords type '{keywords.__class__.__name__}'")
#if keywords.__class__.__name__ == 'bytes':
# keywords = keywords.decode('utf8')
#keywords = keywords.encode('utf-8')
keywords = keywords.lower()
if re.search(',', keywords):
self.entry['split_keywords'] = keywords.split(',')
else:
self.entry['split_keywords'] = keywords.split(' ')
self.process_release(package_data)
self.extract_vcs()
if self.entry['github']:
try:
self.check_github()
except github3.exceptions.NotFoundError:
logger.error(f"404 NotFountError while trying to get data from GitHub: '{self.entry['home_page']}'")
except Exception:
logger.exception(f"Error while trying to get data from GitHub: '{self.entry['home_page']}'")
self.entry['lcname'] = self.entry['name'].lower()
self.download_pkg()
self.save()
def extract_vcs(self):
logger = logging.getLogger('PyDigger.fetch')
vcs_found = False
# https://github.com/szabgab/pydigger.com/
# https://bitbucket.org/ensighten-ondemand/dataintelligence-exports-cli
# http://gitlab.com/dimasmjunior/classic
vcs_es = {
'github': {
'host': 'github.com',
'regex': r'^https?://(www\.)?github.com/([^/]+)/([^/]+)/?$',
},
'gitlab': {
'host': 'gitlab.com',
'regex': r'^https?://(www\.)?gitlab.com/([^/]+)/([^/]+)/?$',
},
'bitbucket': {
'host': 'bitbucket.org',
'regex': r'^https?://(www\.)?bitbucket.org/([^/]+)/([^/]+)/?$',
}
}
for vcs in vcs_es:
self.entry[vcs] = False
for vcs in vcs_es:
if 'home_page' in self.entry and self.entry['home_page'] is not None:
vcs_url = self.entry['home_page']
match = re.search(vcs_es[vcs]['regex'], self.entry['home_page'])
if match:
self.entry[vcs] = True
self.entry[f'{vcs}_user'] = match.group(2)
self.entry[f'{vcs}_project'] = match.group(3)
logger.info(f"Project {self.lcname} Version {self.entry['version']} has VCS {vcs}: {vcs_url}")
vcs_found = True
break
if not vcs_found:
logger.info(f"No VCS found for project {self.lcname} Version {self.entry['version']}")
def process_release(self, package_data):
logger = logging.getLogger('PyDigger.fetch')
version = self.entry['version']
if 'urls' in package_data:
self.entry['urls'] = package_data['urls']
if 'releases' not in package_data:
logger.error(f"There are no releases in package '{self.lcname}' --- {package_data}")
elif version not in package_data['releases']:
logger.error(f"Version '{version}' is not in the releases of package '{self.lcname}' --- {package_data}")
elif len(package_data['releases'][version]) == 0:
logger.error(f"Version '{version}' has no elements in the releases of package {self.lcname} --- {package_data}")
else:
# find the one that has python_version: "source",
# actually we find the first one that has python_version: source
# maybe there are more?
source = package_data['releases'][version][0]
for version_pack in package_data['releases'][version]:
if 'python_version' in version_pack and version_pack['python_version'] == 'source':
if 'url' in version_pack:
self.entry['download_url'] = version_pack['url']
else:
logger.error(f"Version '{version}' has no download_url in the releases of package {self.lcname} --- {package_data}")
source = version_pack
break
#url: https://pypi.org/packages/ce/c7/6431a8ba802bf93d611bfd53c05abcc078165b8aad3603d66c02a847af7d/codacy-coverage-1.2.10.tar.gz
#filename: codacy-coverage-1.2.10.tar.gz
#url: https://pypi.org/packages/84/85/5ce28077fbf455ddf0ba2506cdfdc2e5caa0822b8a4a2747da41b683fad8/purepng-0.1.3.zip
if 'upload_time' not in source:
logger.error(f"upload_time is missing from version {version} in the releases of package {self.name} --- {package_data}")
else:
upload_time = source['upload_time']
self.entry['upload_time'] = datetime.strptime(upload_time, "%Y-%m-%dT%H:%M:%S")
def check_github(self):
logger = logging.getLogger('PyDigger.fetch')
logger.debug("check_github user='{}', project='{}".format(self.entry['github_user'], self.entry['github_project']))
if not self.github:
return
repo = self.github.repository(self.entry['github_user'], self.entry['github_project'])
if not repo:
logger.error("Could not fetch GitHub repository for {}".format(self.entry['name']))
self.entry['error'] = "Could not fetch GitHub repository"
return
logger.debug(f"default_branch: {repo.default_branch}")
# get the last commit of the default branch
branch = repo.branch(repo.default_branch)
if not branch:
logger.error("Could not fetch GitHub branch {} for {}".format(repo.default_branch, self.entry['name']))
self.entry['error'] = "Could not fetch GitHub branch"
return
last_sha = branch.commit.sha
logger.debug(f"last_sha: {last_sha}")
t = repo.tree(last_sha, recursive=True)
self.entry['travis_ci'] = False
self.entry['coveralls'] = False
self.entry['github_actions'] = False
for e in t.tree:
if e.path == '.travis.yml':
self.entry['travis_ci'] = True
if re.search(r'^.github/workflows/.*\.ya?ml$',e.path):
self.entry['github_actions'] = True
if e.path == '.coveragerc':
self.entry['coveralls'] = True
if e.path == 'tox.ini':
self.entry['tox'] = True # http://codespeak.net/tox/
if e.path == 'circle.yml':
self.entry['circle'] = True # https://circleci.com/
if e.path == 'appveyor.yml':
self.entry['appveyor'] = True # https://www.appveyor.com/
if e.path == '.appveyor.yml':
self.entry['appveyor'] = True # https://www.appveyor.com/
if e.path == '.editconfig':
self.entry['editconfig'] = True # http://editorconfig.org/
if e.path == 'dockbot.json':
self.entry['dockbot'] = True # https://github.com/CauldronDevelopmentLLC/dockbot
if e.path == '.landscape.yml':
self.entry['landscape'] = True # https://help.ubuntu.com/lts/clouddocs/en/Installing-Landscape.html
for field in requirements_fields:
if e.path == field + '.txt':
self.entry[field] = []
try:
fh = urllib.request.urlopen(e.url)
as_json = fh.read()
file_info = json.loads(as_json)
content = base64.b64decode(file_info['content'])
logger.debug(f"content type: {content.__class__.__name__}")
logger.debug(f"content: {content}")
if content.__class__.__name__ == 'bytes':
content = content.decode('utf8')
# https://github.com/ingresso-group/pyticketswitch/blob/master/requirements.txt
# contains -r requirements/common.txt which means we need to fetch that file as well
# for now let's just skip this
match = re.search(r'^\s*-r', content)
if not match:
# Capture: UserWarning: Private repos not supported. Skipping.
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
for req in requirements.parse(content):
logger.debug(f"{field}: {req.name} {req.specs} {req.extras}")
# we cannot use the req.name as a key in the dictionary as some of the package names have a . in them
# and MongoDB does not allow . in fieldnames.
self.entry[field].append({ 'name' : req.name, 'specs' : req.specs })
for w in warn:
logger.warning(str(w))
except urllib.error.HTTPError as err:
logger.error(f"Exception when handling the {field}.txt: {err}")
if "rate limit exceeded" in err:
time.sleep(2)
except Exception:
logger.exception(f"Exception when handling the {field}.txt")
logger.debug("github finished")
return
# In the database have a mark that says if the package was already
# downloaded (or not)
# extracted (or not)
def download_pkg(self):
"""Use ``urllib.request.urlretrieve`` to download package to file in sandbox
dir.
"""
logger = logging.getLogger('PyDigger.fetch')
if 'download_url' not in self.entry or self.entry['download_url'] is None:
logger.info("No download_url")
return()
logger.info('download_url {}'.format(self.entry['download_url']))
#if 'local_dir' in self.entry:
# logger.info('')
match = re.search(r'/([^/]+)(\.tar\.gz)$', self.entry['download_url'])
if match:
# local_dir is the name of the file that should be the name of the local directory
local_dir = match.group(1)
extension = match.group(2)
else:
logger.warning("Unsupported download file format: '{}'".format(self.entry['download_url']))
return()
logger.info(f"local_dir '{local_dir}' extension '{extension}'")
src_dir = PyDigger.common.get_source_dir()
logger.info(f"Source directory: {src_dir}")
# TODO use the requests module to download the zipfile
# self.downloaded_from_url = True
def save(self):
logger = logging.getLogger('PyDigger.fetch')
entry = self.entry
logger.info("save_entry: '{}'".format(entry['name']))
#logger.debug("save_entry: {}".format(e)
#my_entries.append(e)
#print(e)
# TODO make sure we only add newer version!
# Version numbers I've seen:
# 1.0.3
# 20160325.161225
# 0.2.0.dev20160325161211
# 3.1.0a12
# 2.0.0.dev11
#doc = db.packages.find_one({'name' : e['name']})
#if doc:
#print(doc)
db.packages.remove({'name' : entry['name']})
db.packages.remove({'name' : entry['name'].lower()})
res = db.packages.insert(entry)
logger.info("INSERT res='{}'".format(res))
def setup_logger(args):
if args.log and args.log.upper() in ['DEBUG', 'INFO', 'WARNING']:
log_level = getattr(logging, args.log.upper())
else:
exit(f'Invalid --log parameter {args.log}')
logger = logging.getLogger('PyDigger')
logger.setLevel(log_level)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-10s - %(message)s')
if args.screen:
sh = logging.StreamHandler()
sh.setLevel(log_level)
sh.setFormatter(log_format)
logger.addHandler(sh)
else:
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
log_file = os.path.join(project_root, 'log', 'fetch.log')
ch = logging.handlers.RotatingFileHandler(log_file, maxBytes=10_000_000, backupCount=10)
ch.setLevel(log_level)
ch.setFormatter(log_format)
logger.addHandler(ch)
logger.info("======================== Starting =================================")
def setup_db():
global db
db = PyDigger.common.get_db()
def setup(args):
setup_db()
setup_logger(args)
def main():
args = get_args()
setup(args)
logger = logging.getLogger('PyDigger.fetch')
logger.info("Starting main")
src_dir = PyDigger.common.get_source_dir()
logger.info("Source directory: {}".format(src_dir))
names = []
packages = None
if args.update:
logger.debug("update: {}".format(args.update))
if args.update == 'rss':
packages = get_from_rss()
elif args.update == 'url':
package = PyPackage("foo")
package.entry['home_page'] = args.url
package.entry['version'] = 0
package.extract_vcs()
package.check_github()
elif args.update == 'deps':
logger.info("Listing dependencies")
seen = {}
for field in requirements_fields:
packages_with_requirements = db.packages.find({field : { '$exists' : True }}, { 'name' : True, field : True})
for p in packages_with_requirements:
for r in p[field]:
name = r['name']
if not name:
logger.info("{} {} found without a name in package {}".format(field, r, p))
continue
if name not in seen:
seen[name] = True
p = db.packages.find_one({'lcname': name.lower()})
if not p:
names.append(name)
elif args.update == 'all':
packages = db.packages.find({}, {'name': True})
elif re.search(r'^\d+$', args.update):
packages = db.packages.find().sort([('pubDate', 1)]).limit(int(args.update))
else:
logger.error("The update option '{}' is not implemented yet".format(args.update))
if packages:
names = [ p['name'] for p in packages ]
elif args.name:
names.append(args.name)
else:
exit(f"Missing --update or --name. Run '{sys.argv[0]} -h' to get help.")
update_packages(args, names)
PyDigger.common.update_cache()
logger.info("Finished")
def update_packages(args, names):
logger = logging.getLogger('PyDigger.fetch')
count = 0
logger.info("Start updating packages")
for name in names:
count += 1
if args.limit and count > args.limit:
break
package = PyPackage(name)
package.get_details()
if args.sleep:
#logger.debug('sleeping {args.sleep}')
time.sleep(args.sleep)
# going over the RSS feed most recent first
def get_from_rss():
logger = logging.getLogger('PyDigger.fetch')
logger.debug("get_from_rss")
rss_data = get_rss()
packages = []
seen_names = []
try:
root = ET.fromstring(rss_data)
except Exception as err:
logger.error(f"Could not parse rss_data\n{err}")
return packages
# seen: xml.etree.ElementTree.ParseError: not well-formed (invalid token)
for item in root.iter('item'):
title = item.find('title')
name, version = title.text.split(' ')
logger.debug(f"Processing '{name}' '{version}'")
lcname = name.lower()
# The same package can appear in the RSS feed twice. We only need to process it once.
if lcname in seen_names:
continue
description = item.find('description').text
pubDate = item.find('pubDate').text
logger.debug(f"Description {description}")
logger.debug(f"pubDate {pubDate}")
# Tue, 01 Oct 2019 18:14:51 GMT
try:
if pubDate[-4:] == ' GMT':
upload_time = datetime.strptime(pubDate[0:-4], "%a, %d %b %Y %H:%M:%S")
else:
upload_time = datetime.strptime(pubDate, "%d %b %Y %H:%M:%S %Z")
except Exception as err:
logger.error(f"Could not parse time '{pubDate}'\n{err}")
continue
entry = {
'name' : name,
'lcname' : lcname,
'summary' : description,
'upload_time' : upload_time,
}
# If this package is already in the database we only need to process if
# the one coming in the RSS feed has a different (hopefully newer) version
# number but if it is not in the database we can already save it
# This still does not solve the problem of packages that have no upload_time
# in their JSON file. Especially if we try to add such a package by name
# and not from the RSS feed
# TODO: check if the new version number is higher than the old one!
doc = db.packages.find_one({'lcname' : lcname})
if doc:
old_version = doc.get('version', '')
if version == old_version:
logger.debug("Skipping '{name}' '{version}'. It is already in the database with this version")
continue
logger.debug("Update '{name}' from '{old_version}' to '{version}'. It is already in the database with this version")
seen_names.append(lcname)
packages.append(entry)
return packages
def get_rss():
logger = logging.getLogger('PyDigger.fetch')
latest_url = 'https://pypi.org/rss/updates.xml'
logger.debug('get_rss from ' + latest_url)
try:
f = urllib.request.urlopen(latest_url)
rss_data = f.read()
f.close()
#raise Exception("hello")
except (urllib.reques.HTTPError, urllib.request.URLError):
logger.exception('Error while fetching ' + latest_url)
raise Exception('Could not fetch RSS feed ' + latest_url)
#logger.debug(rss_data)
return rss_data
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import WEIGHTS_NAME, is_apex_available, is_datasets_available, is_in_notebook, is_torch_tpu_available
from .modeling_utils import PreTrainedModel
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
def _model_unwrap(model: nn.Module) -> nn.Module:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return _model_unwrap(model.module)
else:
return model
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
"""
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# Model parallel
if not self.is_model_parallel:
model = model.to(args.device)
else:
# Force n_gpu to 1 to avoid DataParallel.
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Setup Sharded DDP training
self.sharded_dpp = False
if args.sharded_ddp:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
else:
self.sharded_dpp = True
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description}don"t have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {", ".join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:
num_processes = torch.distributed.get_world_size()
process_index = torch.distributed.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
if num_processes <= 1:
return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
else:
return DistributedLengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.state.global_step % self.args.save_steps == 0:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
"""
Main training entry point.
Args:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
model = self.call_model_init(trial)
if not self.is_model_parallel:
model = model.to(self.args.device)
self.model = model
self.model_wrapped = model
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(model_path)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=(
not getattr(model.config, "gradient_checkpointing", False)
if isinstance(model, PreTrainedModel)
else True
),
)
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = 0
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
self.deepspeed.step()
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def _load_optimizer_and_scheduler(self, model_path):
"""If optimizer and scheduler states exist, load them."""
if model_path is None:
return
if os.path.isfile(os.path.join(model_path, "optimizer.pt")) and os.path.isfile(
os.path.join(model_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(model_path, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(model_path, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(model_path, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
return self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
return outputs["loss"] if isinstance(outputs, dict) else outputs[0]
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = torch.distributed.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if has_labels:
if self.label_smoother is not None and "labels" in inputs:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import WEIGHTS_NAME, is_apex_available, is_datasets_available, is_in_notebook, is_torch_tpu_available
from .modeling_utils import PreTrainedModel
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
TrainOutput,
default_compute_objective,
default_hp_space,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
def _model_unwrap(model: nn.Module) -> nn.Module:
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return _model_unwrap(model.module)
else:
return model
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
"""
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# Model parallel
if not self.is_model_parallel:
model = model.to(args.device)
else:
# Force n_gpu to 1 to avoid DataParallel.
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Setup Sharded DDP training
self.sharded_dpp = False
if args.sharded_ddp:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
else:
self.sharded_dpp = True
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
signature_columns += ["label", "label_ids"]
columns = [k for k in signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:
num_processes = torch.distributed.get_world_size()
process_index = torch.distributed.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
if num_processes <= 1:
return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)
else:
return DistributedLengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.state.global_step % self.args.save_steps == 0:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
self.args.output_dir = checkpoint_dir
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None):
"""
Main training entry point.
Args:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
model = self.call_model_init(trial)
if not self.is_model_parallel:
model = model.to(self.args.device)
self.model = model
self.model_wrapped = model
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(model_path)
model = self.model_wrapped
# Mixed precision training with apex (torch < 1.6)
if self.use_apex:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.sharded_dpp:
model = ShardedDDP(model, self.optimizer)
elif self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=(
not getattr(model.config, "gradient_checkpointing", False)
if isinstance(model, PreTrainedModel)
else True
),
)
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = 0
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
self.deepspeed.step()
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if not self.is_model_parallel:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save.
assert _model_unwrap(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)
else:
output_dir = os.path.join(self.args.output_dir, checkpoint_folder)
self.store_flos()
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_dpp:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True)
def _load_optimizer_and_scheduler(self, model_path):
"""If optimizer and scheduler states exist, load them."""
if model_path is None:
return
if os.path.isfile(os.path.join(model_path, "optimizer.pt")) and os.path.isfile(
os.path.join(model_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(model_path, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(model_path, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(model_path, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
return self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
return outputs["loss"] if isinstance(outputs, dict) else outputs[0]
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = torch.distributed.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if has_labels:
if self.label_smoother is not None and "labels" in inputs:
loss = self.label_smoother(outputs, inputs["labels"]).mean().detach()
else:
loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
|
import argparse
import re
import sys
import boto3
from determined_deploy.aws import aws, constants
from determined_deploy.aws.deployment_types import secure, simple, vpc
def make_down_subparser(subparsers: argparse._SubParsersAction):
subparser = subparsers.add_parser("down", help="delete CloudFormation stack")
require_named = subparser.add_argument_group("required named arguments")
require_named.add_argument(
"--cluster-id", type=str, help="stack name for CloudFormation cluster", required=True
)
subparser.add_argument(
"--region", type=str, default=None, help="AWS region",
)
subparser.add_argument("--aws-profile", type=str, default=None, help=argparse.SUPPRESS)
def make_up_subparser(subparsers: argparse._SubParsersAction):
subparser = subparsers.add_parser("up", help="deploy/update CloudFormation stack")
require_named = subparser.add_argument_group("required named arguments")
require_named.add_argument(
"--cluster-id", type=str, help="stack name for CloudFormation cluster", required=True
)
require_named.add_argument(
"--keypair", type=str, help="aws ec2 keypair for master and agent", required=True
)
subparser.add_argument(
"--master-instance-type", type=str, help="instance type for master",
)
subparser.add_argument(
"--agent-instance-type", type=str, help="instance type for agent",
)
subparser.add_argument(
"--deployment-type",
type=str,
choices=constants.deployment_types.DEPLOYMENT_TYPES,
default=constants.defaults.DEPLOYMENT_TYPE,
help=f"deployment type - "
f'must be one of [{', '.join(constants.deployment_types.DEPLOYMENT_TYPES)}]',
)
subparser.add_argument("--aws-profile", type=str, default=None, help=argparse.SUPPRESS)
subparser.add_argument(
"--inbound-cidr", type=str, help="inbound IP Range in CIDR format",
)
subparser.add_argument(
"--det-version", type=str, help=argparse.SUPPRESS,
)
subparser.add_argument(
"--db-password",
type=str,
default=constants.defaults.DB_PASSWORD,
help="password for master database",
)
subparser.add_argument(
"--region", type=str, default=None, help="AWS region",
)
subparser.add_argument(
"--max-idle-agent-period", type=str, help="max agent idle time",
)
subparser.add_argument(
"--max-agent-starting-period", type=str, help="max agent starting time",
)
subparser.add_argument(
"--max-dynamic-agents",
type=int,
help="maximum number of dynamic agent instances at one time",
)
subparser.add_argument(
"--dry-run", action="store_true", help="print deployment template",
)
def make_aws_parser(subparsers: argparse._SubParsersAction):
parser_aws = subparsers.add_parser("aws", help="AWS help")
aws_subparsers = parser_aws.add_subparsers(help="command", dest="command")
make_down_subparser(aws_subparsers)
make_up_subparser(aws_subparsers)
def deploy_aws(args: argparse.Namespace) -> None:
if args.aws_profile:
boto3_session = boto3.Session(profile_name=args.aws_profile, region_name=args.region)
else:
boto3_session = boto3.Session(region_name=args.region)
if boto3_session.region_name not in constants.misc.SUPPORTED_REGIONS:
print(
f"det-deploy is only supported in {constants.misc.SUPPORTED_REGIONS} - "
f"tried to deploy to {boto3_session.region_name}"
)
print("use the --region argument to deploy to a supported region")
sys.exit(1)
if not re.match(constants.misc.CLOUDFORMATION_REGEX, args.cluster_id):
print("Deployment Failed - cluster-id much match ^[a-zA-Z][-a-zA-Z0-9]*$")
sys.exit(1)
if args.command == "down":
try:
aws.delete(args.cluster_id, boto3_session)
except Exception as e:
print(e)
print("Stack Deletion Failed. Check the AWS CloudFormation Console for details.")
print("Delete Successful")
return
deployment_type_map = {
constants.deployment_types.SIMPLE: simple.Simple,
constants.deployment_types.SECURE: secure.Secure,
constants.deployment_types.VPC: vpc.VPC,
}
det_configs = {
constants.cloudformation.KEYPAIR: args.keypair,
constants.cloudformation.MASTER_INSTANCE_TYPE: args.master_instance_type,
constants.cloudformation.AGENT_INSTANCE_TYPE: args.agent_instance_type,
constants.cloudformation.CLUSTER_ID: args.cluster_id,
constants.cloudformation.BOTO3_SESSION: boto3_session,
constants.cloudformation.VERSION: args.det_version,
constants.cloudformation.INBOUND_CIDR: args.inbound_cidr,
constants.cloudformation.DB_PASSWORD: args.db_password,
constants.cloudformation.MAX_IDLE_AGENT_PERIOD: args.max_idle_agent_period,
constants.cloudformation.MAX_AGENT_STARTING_PERIOD: args.max_agent_starting_period,
constants.cloudformation.MAX_DYNAMIC_AGENTS: args.max_dynamic_agents,
}
deployment_object = deployment_type_map[args.deployment_type](det_configs)
if args.dry_run:
deployment_object.print()
return
print("Starting Determined Deployment")
try:
deployment_object.deploy()
except Exception as e:
print(e)
print("Stack Deployment Failed. Check the AWS CloudFormation Console for details.")
sys.exit(1)
print("Determined Deployment Successful")
| import argparse
import re
import sys
import boto3
from determined_deploy.aws import aws, constants
from determined_deploy.aws.deployment_types import secure, simple, vpc
def make_down_subparser(subparsers: argparse._SubParsersAction):
subparser = subparsers.add_parser("down", help="delete CloudFormation stack")
require_named = subparser.add_argument_group("required named arguments")
require_named.add_argument(
"--cluster-id", type=str, help="stack name for CloudFormation cluster", required=True
)
subparser.add_argument(
"--region", type=str, default=None, help="AWS region",
)
subparser.add_argument("--aws-profile", type=str, default=None, help=argparse.SUPPRESS)
def make_up_subparser(subparsers: argparse._SubParsersAction):
subparser = subparsers.add_parser("up", help="deploy/update CloudFormation stack")
require_named = subparser.add_argument_group("required named arguments")
require_named.add_argument(
"--cluster-id", type=str, help="stack name for CloudFormation cluster", required=True
)
require_named.add_argument(
"--keypair", type=str, help="aws ec2 keypair for master and agent", required=True
)
subparser.add_argument(
"--master-instance-type", type=str, help="instance type for master",
)
subparser.add_argument(
"--agent-instance-type", type=str, help="instance type for agent",
)
subparser.add_argument(
"--deployment-type",
type=str,
choices=constants.deployment_types.DEPLOYMENT_TYPES,
default=constants.defaults.DEPLOYMENT_TYPE,
help=f"deployment type - "
f'must be one of [{", ".join(constants.deployment_types.DEPLOYMENT_TYPES)}]',
)
subparser.add_argument("--aws-profile", type=str, default=None, help=argparse.SUPPRESS)
subparser.add_argument(
"--inbound-cidr", type=str, help="inbound IP Range in CIDR format",
)
subparser.add_argument(
"--det-version", type=str, help=argparse.SUPPRESS,
)
subparser.add_argument(
"--db-password",
type=str,
default=constants.defaults.DB_PASSWORD,
help="password for master database",
)
subparser.add_argument(
"--region", type=str, default=None, help="AWS region",
)
subparser.add_argument(
"--max-idle-agent-period", type=str, help="max agent idle time",
)
subparser.add_argument(
"--max-agent-starting-period", type=str, help="max agent starting time",
)
subparser.add_argument(
"--max-dynamic-agents",
type=int,
help="maximum number of dynamic agent instances at one time",
)
subparser.add_argument(
"--dry-run", action="store_true", help="print deployment template",
)
def make_aws_parser(subparsers: argparse._SubParsersAction):
parser_aws = subparsers.add_parser("aws", help="AWS help")
aws_subparsers = parser_aws.add_subparsers(help="command", dest="command")
make_down_subparser(aws_subparsers)
make_up_subparser(aws_subparsers)
def deploy_aws(args: argparse.Namespace) -> None:
if args.aws_profile:
boto3_session = boto3.Session(profile_name=args.aws_profile, region_name=args.region)
else:
boto3_session = boto3.Session(region_name=args.region)
if boto3_session.region_name not in constants.misc.SUPPORTED_REGIONS:
print(
f"det-deploy is only supported in {constants.misc.SUPPORTED_REGIONS} - "
f"tried to deploy to {boto3_session.region_name}"
)
print("use the --region argument to deploy to a supported region")
sys.exit(1)
if not re.match(constants.misc.CLOUDFORMATION_REGEX, args.cluster_id):
print("Deployment Failed - cluster-id much match ^[a-zA-Z][-a-zA-Z0-9]*$")
sys.exit(1)
if args.command == "down":
try:
aws.delete(args.cluster_id, boto3_session)
except Exception as e:
print(e)
print("Stack Deletion Failed. Check the AWS CloudFormation Console for details.")
print("Delete Successful")
return
deployment_type_map = {
constants.deployment_types.SIMPLE: simple.Simple,
constants.deployment_types.SECURE: secure.Secure,
constants.deployment_types.VPC: vpc.VPC,
}
det_configs = {
constants.cloudformation.KEYPAIR: args.keypair,
constants.cloudformation.MASTER_INSTANCE_TYPE: args.master_instance_type,
constants.cloudformation.AGENT_INSTANCE_TYPE: args.agent_instance_type,
constants.cloudformation.CLUSTER_ID: args.cluster_id,
constants.cloudformation.BOTO3_SESSION: boto3_session,
constants.cloudformation.VERSION: args.det_version,
constants.cloudformation.INBOUND_CIDR: args.inbound_cidr,
constants.cloudformation.DB_PASSWORD: args.db_password,
constants.cloudformation.MAX_IDLE_AGENT_PERIOD: args.max_idle_agent_period,
constants.cloudformation.MAX_AGENT_STARTING_PERIOD: args.max_agent_starting_period,
constants.cloudformation.MAX_DYNAMIC_AGENTS: args.max_dynamic_agents,
}
deployment_object = deployment_type_map[args.deployment_type](det_configs)
if args.dry_run:
deployment_object.print()
return
print("Starting Determined Deployment")
try:
deployment_object.deploy()
except Exception as e:
print(e)
print("Stack Deployment Failed. Check the AWS CloudFormation Console for details.")
sys.exit(1)
print("Determined Deployment Successful")
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
from typing import Any, Dict, Optional, Tuple
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
import pandas.core.common as com
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: Dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: Optional[Dict[str, Any]] = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: Dict[str, Any]
def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> "TermValue":
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: Optional[Tuple[Any, Any, pd.Index]] = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({" | ".join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError(f"cannot subscript {repr(value)} with {repr(slobj)}")
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: Optional[PyTablesExprVisitor]
env: PyTablesScope
def __init__(
self,
where,
queryables: Optional[Dict[str, Any]] = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif isinstance(where, (list, tuple)):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join((f"({w})" for w in com.flatten(where)))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
)
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
)
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
""" quote the string if not encoded
else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
| """ manage PyTables query interface via Expressions """
import ast
from functools import partial
from typing import Any, Dict, Optional, Tuple
import numpy as np
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas as pd
import pandas.core.common as com
from pandas.core.computation import expr, ops, scope as _scope
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: Dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: Optional[Dict[str, Any]] = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: Dict[str, Any]
def __init__(self, op: str, lhs, rhs, queryables: Dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> "TermValue":
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side="left")
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: Optional[Tuple[Any, Any, pd.Index]] = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, pd.Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError(f"cannot subscript {repr(value)} with {repr(slobj)}")
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: Optional[PyTablesExprVisitor]
env: PyTablesScope
def __init__(
self,
where,
queryables: Optional[Dict[str, Any]] = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif isinstance(where, (list, tuple)):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join((f"({w})" for w in com.flatten(where)))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
)
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
)
return self.condition, self.filter
class TermValue:
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
""" quote the string if not encoded
else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
|
import os
import sys
sys.path.append(os.getcwd())
# Our infrastucture files
# from utils_data import *
# from utils_nn import *
from learn.utils.data import *
from learn.utils.nn import *
# neural nets
from learn.models.model_general_nn import GeneralNN
from learn.models.model_ensemble_nn import EnsembleNN
# Torch Packages
import torch
# timing etc
import os
import hydra
# Plotting
import matplotlib.pyplot as plt
import logging
log = logging.getLogger(__name__)
def save_file(object, filename):
path = os.path.join(os.getcwd(), filename)
log.info(f"Saving File: {filename}")
torch.save(object, path)
def create_model_params(df, model_cfg):
# only take targets from robot.yaml
target_keys = []
for typ in model_cfg.delta_state_targets:
target_keys.append(typ + '_0dx')
for typ in model_cfg.true_state_targets:
target_keys.append(typ + '_1fx')
# grab variables
history_states = df.filter(regex='tx')
history_actions = df.filter(regex='tu')
# add extra inputs like objective function
extra_inputs = []
if model_cfg.extra_inputs:
for extra in model_cfg.extra_inputs:
extra_inputs.append(extra)
# trim past states to be what we want
history = int(history_states.columns[-1][-3])
if history > model_cfg.history:
for i in range(history, model_cfg.history, -1):
str_remove = str(i) + 't'
for state in history_states.columns:
if str_remove in state:
history_states.drop(columns=state, inplace=True)
for action in history_actions.columns:
if str_remove in action:
history_actions.drop(columns=action, inplace=True)
# ignore states not helpful to prediction
for ignore in model_cfg.ignore_in:
for state in history_states.columns:
if ignore in state:
history_states.drop(columns=state, inplace=True)
params = dict()
params['targets'] = df.loc[:, target_keys]
params['states'] = history_states
params['inputs'] = history_actions
# TODO add extra inputs to these parameters
return params
def params_to_training(data):
X = data['states'].values
U = data['inputs'].values
dX = data['targets'].values
return X, U, dX
def train_model(X, U, dX, model_cfg):
log.info("Training Model")
dx = np.shape(X)[1]
du = np.shape(U)[1]
dt = np.shape(dX)[1]
# if set dimensions, double check them here
if model_cfg.training.dx != -1:
assert model_cfg.training.dx == dx, "model dimensions in cfg do not match data given"
if model_cfg.training.du != -1:
assert model_cfg.training.dx == du, "model dimensions in cfg do not match data given"
if model_cfg.training.dt != -1:
assert model_cfg.training.dx == dt, "model dimensions in cfg do not match data given"
train_log = dict()
nn_params = { # all should be pretty self-explanatory
'dx': dx,
'du': du,
'dt': dt,
'hid_width': model_cfg.training.hid_width,
'hid_depth': model_cfg.training.hid_depth,
'bayesian_flag': model_cfg.training.probl,
'activation': Swish(), # TODO use hydra.utils.instantiate
'dropout': model_cfg.training.extra.dropout,
'split_flag': False,
'ensemble': model_cfg.ensemble
}
train_params = {
'epochs': model_cfg.optimizer.epochs,
'batch_size': model_cfg.optimizer.batch,
'optim': model_cfg.optimizer.name,
'split': model_cfg.optimizer.split,
'lr': model_cfg.optimizer.lr, # bayesian .00175, mse: .0001
'lr_schedule': model_cfg.optimizer.lr_schedule,
'test_loss_fnc': [],
'preprocess': model_cfg.optimizer.preprocess,
}
train_log['nn_params'] = nn_params
train_log['train_params'] = train_params
if model_cfg.ensemble:
newNN = EnsembleNN(nn_params, model_cfg.training.E)
acctest, acctrain = newNN.train_cust((X, U, dX), train_params)
else:
newNN = GeneralNN(nn_params)
newNN.init_weights_orth()
if nn_params['bayesian_flag']: newNN.init_loss_fnc(dX, l_mean=1, l_cov=1) # data for std,
acctest, acctrain = newNN.train_cust((X, U, dX), train_params)
if model_cfg.ensemble:
min_err = np.min(acctrain, 0)
min_err_test = np.min(acctest, 0)
else:
min_err = np.min(acctrain)
min_err_test = np.min(acctest)
train_log['testerror'] = acctest
train_log['trainerror'] = acctrain
train_log['min_trainerror'] = min_err
train_log['min_testerror'] = min_err_test
return newNN, train_log
######################################################################
@hydra.main(config_path='conf/trainer.yaml')
def trainer(cfg):
log.info("============= Configuration =============")
log.info(f"Config:\n{cfg.pretty()}")
log.info("=========================================")
######################################################################
log.info('Training a new model')
data_dir = cfg.load.base_dir
avail_data = os.path.join(os.getcwd()[:os.getcwd().rfind('outputs')-1]+f"/ex_data/SAS/{cfg.robot}.csv")
if os.path.isfile(avail_data):
df = pd.read_csv(avail_data)
log.info(f"Loaded preprocessed data from {avail_data}")
else:
if cfg.robot =='iono':
df, log_load = preprocess_iono(data_dir, cfg.load)
else:
df, log_load = preprocess_cf(data_dir, cfg.load)
msg = f"Loading Data"
if 'dir' in log_load is not None:
msg += f", dir={log_load["dir"]}"
if 'num_files' in log_load is not None:
msg += f", num_files={log_load["num_files"]}"
if 'datapoints' in log_load:
msg += f", datapoints={log_load["datapoints"]}"
log.info(msg)
data = create_model_params(df, cfg.model)
X, U, dX = params_to_training(data)
model, train_log = train_model(X, U, dX, cfg.model)
model.store_training_lists(list(data['states'].columns),
list(data['inputs'].columns),
list(data['targets'].columns))
msg = "Trained Model..."
msg += "Prediction List" + str(list(data['targets'].columns)) + "\n"
msg += "Min test error: " + str(train_log['min_testerror']) + "\n"
msg += "Mean Min test error: " + str(np.mean(train_log['min_testerror'])) + "\n"
msg += "Min train error: " + str(train_log['min_trainerror']) + "\n"
log.info(msg)
if cfg.model.training.plot_loss:
ax1 = plt.subplot(211)
ax1.plot(train_log['testerror'], label='Test Loss')
plt.title('Test Loss')
ax2 = plt.subplot(212)
ax2.plot(train_log['trainerror'], label='Train Loss')
plt.title('Training Loss')
ax1.legend()
# plt.show()
plt.savefig(os.path.join(os.getcwd() + '/modeltraining.pdf'))
# Saves NN params
if cfg.save:
save_file(model, cfg.model.name + '.pth')
normX, normU, normdX = model.getNormScalers()
save_file((normX, normU, normdX), cfg.model.name + "_normparams.pkl")
# Saves data file
save_file(data, cfg.model.name + "_data.pkl")
if __name__ == '__main__':
sys.exit(trainer())
| import os
import sys
sys.path.append(os.getcwd())
# Our infrastucture files
# from utils_data import *
# from utils_nn import *
from learn.utils.data import *
from learn.utils.nn import *
# neural nets
from learn.models.model_general_nn import GeneralNN
from learn.models.model_ensemble_nn import EnsembleNN
# Torch Packages
import torch
# timing etc
import os
import hydra
# Plotting
import matplotlib.pyplot as plt
import logging
log = logging.getLogger(__name__)
def save_file(object, filename):
path = os.path.join(os.getcwd(), filename)
log.info(f"Saving File: {filename}")
torch.save(object, path)
def create_model_params(df, model_cfg):
# only take targets from robot.yaml
target_keys = []
for typ in model_cfg.delta_state_targets:
target_keys.append(typ + '_0dx')
for typ in model_cfg.true_state_targets:
target_keys.append(typ + '_1fx')
# grab variables
history_states = df.filter(regex='tx')
history_actions = df.filter(regex='tu')
# add extra inputs like objective function
extra_inputs = []
if model_cfg.extra_inputs:
for extra in model_cfg.extra_inputs:
extra_inputs.append(extra)
# trim past states to be what we want
history = int(history_states.columns[-1][-3])
if history > model_cfg.history:
for i in range(history, model_cfg.history, -1):
str_remove = str(i) + 't'
for state in history_states.columns:
if str_remove in state:
history_states.drop(columns=state, inplace=True)
for action in history_actions.columns:
if str_remove in action:
history_actions.drop(columns=action, inplace=True)
# ignore states not helpful to prediction
for ignore in model_cfg.ignore_in:
for state in history_states.columns:
if ignore in state:
history_states.drop(columns=state, inplace=True)
params = dict()
params['targets'] = df.loc[:, target_keys]
params['states'] = history_states
params['inputs'] = history_actions
# TODO add extra inputs to these parameters
return params
def params_to_training(data):
X = data['states'].values
U = data['inputs'].values
dX = data['targets'].values
return X, U, dX
def train_model(X, U, dX, model_cfg):
log.info("Training Model")
dx = np.shape(X)[1]
du = np.shape(U)[1]
dt = np.shape(dX)[1]
# if set dimensions, double check them here
if model_cfg.training.dx != -1:
assert model_cfg.training.dx == dx, "model dimensions in cfg do not match data given"
if model_cfg.training.du != -1:
assert model_cfg.training.dx == du, "model dimensions in cfg do not match data given"
if model_cfg.training.dt != -1:
assert model_cfg.training.dx == dt, "model dimensions in cfg do not match data given"
train_log = dict()
nn_params = { # all should be pretty self-explanatory
'dx': dx,
'du': du,
'dt': dt,
'hid_width': model_cfg.training.hid_width,
'hid_depth': model_cfg.training.hid_depth,
'bayesian_flag': model_cfg.training.probl,
'activation': Swish(), # TODO use hydra.utils.instantiate
'dropout': model_cfg.training.extra.dropout,
'split_flag': False,
'ensemble': model_cfg.ensemble
}
train_params = {
'epochs': model_cfg.optimizer.epochs,
'batch_size': model_cfg.optimizer.batch,
'optim': model_cfg.optimizer.name,
'split': model_cfg.optimizer.split,
'lr': model_cfg.optimizer.lr, # bayesian .00175, mse: .0001
'lr_schedule': model_cfg.optimizer.lr_schedule,
'test_loss_fnc': [],
'preprocess': model_cfg.optimizer.preprocess,
}
train_log['nn_params'] = nn_params
train_log['train_params'] = train_params
if model_cfg.ensemble:
newNN = EnsembleNN(nn_params, model_cfg.training.E)
acctest, acctrain = newNN.train_cust((X, U, dX), train_params)
else:
newNN = GeneralNN(nn_params)
newNN.init_weights_orth()
if nn_params['bayesian_flag']: newNN.init_loss_fnc(dX, l_mean=1, l_cov=1) # data for std,
acctest, acctrain = newNN.train_cust((X, U, dX), train_params)
if model_cfg.ensemble:
min_err = np.min(acctrain, 0)
min_err_test = np.min(acctest, 0)
else:
min_err = np.min(acctrain)
min_err_test = np.min(acctest)
train_log['testerror'] = acctest
train_log['trainerror'] = acctrain
train_log['min_trainerror'] = min_err
train_log['min_testerror'] = min_err_test
return newNN, train_log
######################################################################
@hydra.main(config_path='conf/trainer.yaml')
def trainer(cfg):
log.info("============= Configuration =============")
log.info(f"Config:\n{cfg.pretty()}")
log.info("=========================================")
######################################################################
log.info('Training a new model')
data_dir = cfg.load.base_dir
avail_data = os.path.join(os.getcwd()[:os.getcwd().rfind('outputs')-1]+f"/ex_data/SAS/{cfg.robot}.csv")
if os.path.isfile(avail_data):
df = pd.read_csv(avail_data)
log.info(f"Loaded preprocessed data from {avail_data}")
else:
if cfg.robot =='iono':
df, log_load = preprocess_iono(data_dir, cfg.load)
else:
df, log_load = preprocess_cf(data_dir, cfg.load)
msg = f"Loading Data"
if 'dir' in log_load is not None:
msg += f", dir={log_load['dir']}"
if 'num_files' in log_load is not None:
msg += f", num_files={log_load['num_files']}"
if 'datapoints' in log_load:
msg += f", datapoints={log_load['datapoints']}"
log.info(msg)
data = create_model_params(df, cfg.model)
X, U, dX = params_to_training(data)
model, train_log = train_model(X, U, dX, cfg.model)
model.store_training_lists(list(data['states'].columns),
list(data['inputs'].columns),
list(data['targets'].columns))
msg = "Trained Model..."
msg += "Prediction List" + str(list(data['targets'].columns)) + "\n"
msg += "Min test error: " + str(train_log['min_testerror']) + "\n"
msg += "Mean Min test error: " + str(np.mean(train_log['min_testerror'])) + "\n"
msg += "Min train error: " + str(train_log['min_trainerror']) + "\n"
log.info(msg)
if cfg.model.training.plot_loss:
ax1 = plt.subplot(211)
ax1.plot(train_log['testerror'], label='Test Loss')
plt.title('Test Loss')
ax2 = plt.subplot(212)
ax2.plot(train_log['trainerror'], label='Train Loss')
plt.title('Training Loss')
ax1.legend()
# plt.show()
plt.savefig(os.path.join(os.getcwd() + '/modeltraining.pdf'))
# Saves NN params
if cfg.save:
save_file(model, cfg.model.name + '.pth')
normX, normU, normdX = model.getNormScalers()
save_file((normX, normU, normdX), cfg.model.name + "_normparams.pkl")
# Saves data file
save_file(data, cfg.model.name + "_data.pkl")
if __name__ == '__main__':
sys.exit(trainer())
|
import asyncio
import logging
from time import time
from typing import Dict, List, Optional, Tuple, Callable
import pytest
import covid.server.ws_connection as ws
from covid.full_node.mempool import Mempool
from covid.full_node.full_node_api import FullNodeAPI
from covid.protocols import full_node_protocol
from covid.simulator.simulator_protocol import FarmNewBlockProtocol
from covid.types.announcement import Announcement
from covid.types.blockchain_format.coin import Coin
from covid.types.blockchain_format.sized_bytes import bytes32
from covid.types.coin_spend import CoinSpend
from covid.types.condition_opcodes import ConditionOpcode
from covid.types.condition_with_args import ConditionWithArgs
from covid.types.spend_bundle import SpendBundle
from covid.types.mempool_item import MempoolItem
from covid.util.clvm import int_to_bytes
from covid.util.condition_tools import conditions_for_solution
from covid.util.errors import Err
from covid.util.ints import uint64
from covid.util.hash import std_hash
from covid.types.mempool_inclusion_status import MempoolInclusionStatus
from covid.util.api_decorators import api_request, peer_required, bytes_required
from covid.full_node.mempool_check_conditions import get_name_puzzle_conditions
from covid.full_node.pending_tx_cache import PendingTxCache
from blspy import G2Element
from covid.util.recursive_replace import recursive_replace
from tests.connection_utils import connect_and_get_peer
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import bt, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
from covid.types.blockchain_format.program import Program, INFINITE_COST
from covid.consensus.cost_calculator import NPCResult
from covid.types.blockchain_format.program import SerializedProgram
from clvm_tools import binutils
from covid.types.generator_types import BlockGenerator
from clvm.casts import int_from_bytes
BURN_PUZZLE_HASH = b"0" * 32
BURN_PUZZLE_HASH_2 = b"1" * 32
WALLET_A = bt.get_pool_wallet_tool()
log = logging.getLogger(__name__)
def generate_test_spend_bundle(
coin: Coin,
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: uint64 = uint64(0),
amount: uint64 = uint64(1000),
new_puzzle_hash=BURN_PUZZLE_HASH,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = WALLET_A.generate_signed_transaction(amount, new_puzzle_hash, coin, condition_dic, fee)
assert transaction is not None
return transaction
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture(scope="module")
async def two_nodes():
async_gen = setup_simulators_and_wallets(2, 1, {})
nodes, _ = await async_gen.__anext__()
full_node_1 = nodes[0]
full_node_2 = nodes[1]
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
yield full_node_1, full_node_2, server_1, server_2
async for _ in async_gen:
yield _
def make_item(idx: int, cost: uint64 = uint64(80)) -> MempoolItem:
spend_bundle_name = bytes([idx] * 32)
return MempoolItem(
SpendBundle([], G2Element()),
uint64(0),
NPCResult(None, [], cost),
cost,
spend_bundle_name,
[],
[],
SerializedProgram(),
)
class TestPendingTxCache:
def test_recall(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
def test_fifo_limit(self):
c = PendingTxCache(200)
# each item has cost 80
items = [make_item(i) for i in range(1, 4)]
for i in items:
c.add(i)
# the max cost is 200, only two transactions will fit
# we evict items FIFO, so the to most recently added will be left
tx = c.drain()
assert tx == {items[-2].spend_bundle_name: items[-2], items[-1].spend_bundle_name: items[-1]}
def test_drain(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
# drain will clear the cache, so a second call will be empty
tx = c.drain()
assert tx == {}
def test_cost(self):
c = PendingTxCache(200)
assert c.cost() == 0
item1 = make_item(1)
c.add(item1)
# each item has cost 80
assert c.cost() == 80
item2 = make_item(2)
c.add(item2)
assert c.cost() == 160
# the first item is evicted, so the cost stays the same
item3 = make_item(3)
c.add(item3)
assert c.cost() == 160
tx = c.drain()
assert tx == {item2.spend_bundle_name: item2, item3.spend_bundle_name: item3}
assert c.cost() == 0
item4 = make_item(4)
c.add(item4)
assert c.cost() == 80
tx = c.drain()
assert tx == {item4.spend_bundle_name: item4}
class TestMempool:
@pytest.mark.asyncio
async def test_basic_mempool(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
3,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, _, server_1, _ = two_nodes
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, blocks[-1].height)
max_mempool_cost = 40000000 * 5
mempool = Mempool(max_mempool_cost)
assert mempool.get_min_fee_rate(104000) == 0
with pytest.raises(ValueError):
mempool.get_min_fee_rate(max_mempool_cost + 1)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle is not None
@peer_required
@api_request
@bytes_required
async def respond_transaction(
node: FullNodeAPI,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSCovidConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in node.full_node.full_node_store.pending_tx_request:
node.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in node.full_node.full_node_store.peers_with_tx:
node.full_node.full_node_store.peers_with_tx.pop(spend_name)
return await node.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
class TestMempoolManager:
@pytest.mark.asyncio
async def test_basic_mempool_manager(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
5,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_2, blocks[-1].height)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle is not None
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
res = await full_node_1.respond_transaction(tx, peer)
log.info(f"Res {res}")
await time_out_assert(
10,
full_node_1.full_node.mempool_manager.get_spendbundle,
spend_bundle,
spend_bundle.name(),
)
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the assert condition is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_announcement_duplicate_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp] * 100}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the create announcement is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_duplicate_announcement_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2] * 100}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_double_spend(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
spend_bundle1 = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
spend_bundle2 = generate_test_spend_bundle(
list(blocks[-1].get_included_reward_coins())[0],
new_puzzle_hash=BURN_PUZZLE_HASH_2,
)
assert spend_bundle2 is not None
tx2: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle2)
status, err = await respond_transaction(full_node_1, tx2, peer)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
sb2 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle2.name())
assert sb1 == spend_bundle1
assert sb2 is None
assert status == MempoolInclusionStatus.PENDING
assert err == Err.MEMPOOL_CONFLICT
async def send_sb(self, node, peer, sb):
tx = full_node_protocol.RespondTransaction(sb)
await node.respond_transaction(tx, peer)
async def gen_and_send_sb(self, node, peer, *args, **kwargs):
sb = generate_test_spend_bundle(*args, **kwargs)
assert sb is not None
await self.send_sb(node, peer, sb)
return sb
def assert_sb_in_pool(self, node, sb):
assert sb == node.full_node.mempool_manager.get_spendbundle(sb.name())
def assert_sb_not_in_pool(self, node, sb):
assert node.full_node.mempool_manager.get_spendbundle(sb.name()) is None
@pytest.mark.asyncio
async def test_double_spend_with_higher_fee(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coins = iter(blocks[-1].get_included_reward_coins())
coin1, coin2 = next(coins), next(coins)
coins = iter(blocks[-2].get_included_reward_coins())
coin3, coin4 = next(coins), next(coins)
sb1_1 = await self.gen_and_send_sb(full_node_1, peer, coin1)
sb1_2 = await self.gen_and_send_sb(full_node_1, peer, coin1, fee=uint64(1))
# Fee increase is insufficient, the old spendbundle must stay
self.assert_sb_in_pool(full_node_1, sb1_1)
self.assert_sb_not_in_pool(full_node_1, sb1_2)
min_fee_increase = full_node_1.full_node.mempool_manager.get_min_fee_increase()
sb1_3 = await self.gen_and_send_sb(full_node_1, peer, coin1, fee=uint64(min_fee_increase))
# Fee increase is sufficiently high, sb1_1 gets replaced with sb1_3
self.assert_sb_not_in_pool(full_node_1, sb1_1)
self.assert_sb_in_pool(full_node_1, sb1_3)
sb2 = generate_test_spend_bundle(coin2, fee=uint64(min_fee_increase))
sb12 = SpendBundle.aggregate((sb2, sb1_3))
await self.send_sb(full_node_1, peer, sb12)
# Aggregated spendbundle sb12 replaces sb1_3 since it spends a superset
# of coins spent in sb1_3
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb1_3)
sb3 = generate_test_spend_bundle(coin3, fee=uint64(min_fee_increase * 2))
sb23 = SpendBundle.aggregate((sb2, sb3))
await self.send_sb(full_node_1, peer, sb23)
# sb23 must not replace existing sb12 as the former does not spend all
# coins that are spent in the latter (specifically, coin1)
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb23)
await self.send_sb(full_node_1, peer, sb3)
# Adding non-conflicting sb3 should succeed
self.assert_sb_in_pool(full_node_1, sb3)
sb4_1 = generate_test_spend_bundle(coin4, fee=uint64(min_fee_increase))
sb1234_1 = SpendBundle.aggregate((sb12, sb3, sb4_1))
await self.send_sb(full_node_1, peer, sb1234_1)
# sb1234_1 should not be in pool as it decreases total fees per cost
self.assert_sb_not_in_pool(full_node_1, sb1234_1)
sb4_2 = generate_test_spend_bundle(coin4, fee=uint64(min_fee_increase * 2))
sb1234_2 = SpendBundle.aggregate((sb12, sb3, sb4_2))
await self.send_sb(full_node_1, peer, sb1234_2)
# sb1234_2 has a higher fee per cost than its conflicts and should get
# into mempool
self.assert_sb_in_pool(full_node_1, sb1234_2)
self.assert_sb_not_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb3)
async def condition_tester(
self,
two_nodes,
dic: Dict[ConditionOpcode, List[ConditionWithArgs]],
fee: int = 0,
num_blocks: int = 3,
coin: Optional[Coin] = None,
):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
num_blocks,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + num_blocks)
spend_bundle1 = generate_test_spend_bundle(
coin or list(blocks[-num_blocks + 2].get_included_reward_coins())[0], dic, uint64(fee)
)
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
return blocks, spend_bundle1, peer, status, err
@pytest.mark.asyncio
async def condition_tester2(self, two_nodes, test_fun: Callable[[Coin, Coin], SpendBundle]):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height if len(blocks) > 0 else -1
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coin_1 = list(blocks[-2].get_included_reward_coins())[0]
coin_2 = list(blocks[-1].get_included_reward_coins())[0]
bundle = test_fun(coin_1, coin_2)
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(bundle)
status, err = await respond_transaction(full_node_1, tx1, peer)
return blocks, bundle, status, err
@pytest.mark.asyncio
async def test_invalid_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
[int_to_bytes(start_height + 5)],
)
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_block_index_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_correct_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_block_index_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1), b"garbage"])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_negative_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(-1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_invalid_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(5)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_RELATIVE_FAILED
@pytest.mark.asyncio
async def test_block_age_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_correct_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_block_age_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_negative_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_correct_my_id(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin.name()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_id_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_invalid_my_id(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
coin_2 = list(blocks[-2].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin_2.name()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_COIN_ID_FAILED
@pytest.mark.asyncio
async def test_my_id_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_exceeds(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# 5 seconds should be before the next block
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 5
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_fail(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 1000
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_SECONDS_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_assert_height_pending(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
print(full_node_1.full_node.blockchain.get_peak())
current_height = full_node_1.full_node.blockchain.get_peak().height
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(current_height + 4)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_assert_time_negative(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = -1
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 5
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_exceeds(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = 3
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_SECONDS_RELATIVE_FAILED
for i in range(0, 4):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
tx2: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx2, peer)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = 0
# garbage at the end of the arguments is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_relative_negative(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = -3
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# ensure one spend can assert a coin announcement from another spend
@pytest.mark.asyncio
async def test_correct_coin_announcement_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# ensure one spend can assert a coin announcement from another spend, even
# though the conditions have garbage (ignored) at the end
@pytest.mark.asyncio
async def test_coin_announcement_garbage(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
# garbage at the end is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
# garbage at the end is ignored
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test", b"garbage"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_coin_announcement_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
# missing arg here
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_coin_announcement_missing_arg2(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# missing arg here
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_coin_announcement_too_big(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), bytes([1] * 10000))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=bundle
)
try:
await full_node_1.full_node.blockchain.receive_block(blocks[-1])
assert False
except AssertionError:
pass
# ensure an assert coin announcement is rejected if it doesn't match the
# create announcement
@pytest.mark.asyncio
async def test_invalid_coin_announcement_rejected(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# mismatching message
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"wrong test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_invalid_coin_announcement_rejected_two(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_1.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
# coin 2 is making the announcement, right message wrong coin
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_correct_puzzle_announcement(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes(0x80))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, [bytes(0x80)])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_puzzle_announcement_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes(0x80))
# garbage at the end is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
# garbage at the end is ignored
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, [bytes(0x80), b"garbage"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_puzzle_announcement_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
# missing arg here
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_puzzle_announcement_missing_arg2(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# missing arg here
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement_rejected(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes("test", "utf-8"))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"wrong test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement_rejected_two(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# Wrong type of Create_announcement
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is not None
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_fee_condition_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the arguments is ignored
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is not None
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_fee_condition_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_fee_condition_negative_fee(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=spend_bundle1
)
assert full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name()) is None
assert (await full_node_1.full_node.blockchain.receive_block(blocks[-1]))[1] == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition_fee_too_large(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(2 ** 64)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=spend_bundle1
)
assert full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name()) is None
assert (await full_node_1.full_node.blockchain.receive_block(blocks[-1]))[1] == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition_wrong_fee(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=9)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_stealing_fee(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
5,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 5)
receiver_puzzlehash = BURN_PUZZLE_HASH
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
fee = 9
coin_1 = list(blocks[-2].get_included_reward_coins())[0]
coin_2 = None
for coin in list(blocks[-1].get_included_reward_coins()):
if coin.amount == coin_1.amount:
coin_2 = coin
spend_bundle1 = generate_test_spend_bundle(coin_1, dic, uint64(fee))
steal_fee_spendbundle = WALLET_A.generate_signed_transaction(
coin_1.amount + fee - 4, receiver_puzzlehash, coin_2
)
assert spend_bundle1 is not None
assert steal_fee_spendbundle is not None
combined = SpendBundle.aggregate([spend_bundle1, steal_fee_spendbundle])
assert combined.fees() == 4
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_double_spend_same_bundle(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coin = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle1 = generate_test_spend_bundle(coin)
assert spend_bundle1 is not None
spend_bundle2 = generate_test_spend_bundle(
coin,
new_puzzle_hash=BURN_PUZZLE_HASH_2,
)
assert spend_bundle2 is not None
spend_bundle_combined = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle_combined)
status, err = await respond_transaction(full_node_1, tx, peer)
sb = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle_combined.name())
assert sb is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.DOUBLE_SPEND
@pytest.mark.asyncio
async def test_agg_sig_condition(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
# this code has been changed to use generate_test_spend_bundle
# not quite sure why all the gymnastics are being performed
coin = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle_0 = generate_test_spend_bundle(coin)
unsigned: List[CoinSpend] = spend_bundle_0.coin_spends
assert len(unsigned) == 1
coin_spend: CoinSpend = unsigned[0]
err, con, cost = conditions_for_solution(coin_spend.puzzle_reveal, coin_spend.solution, INFINITE_COST)
assert con is not None
# TODO(straya): fix this test
# puzzle, solution = list(coin_spend.solution.as_iter())
# conditions_dict = conditions_by_opcode(con)
# pkm_pairs = pkm_pairs_for_conditions_dict(conditions_dict, coin_spend.coin.name())
# assert len(pkm_pairs) == 1
#
# assert pkm_pairs[0][1] == solution.rest().first().get_tree_hash() + coin_spend.coin.name()
#
# spend_bundle = WALLET_A.sign_transaction(unsigned)
# assert spend_bundle is not None
#
# tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
# await full_node_1.respond_transaction(tx, peer)
#
# sb = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle.name())
# assert sb is spend_bundle
@pytest.mark.asyncio
async def test_correct_my_parent(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin.parent_coin_info])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_parent_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin.parent_coin_info, b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_parent_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_parent(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
coin_2 = list(blocks[-2].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin_2.parent_coin_info])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_PARENT_ID_FAILED
@pytest.mark.asyncio
async def test_correct_my_puzhash(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [coin.puzzle_hash])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_puzhash_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [coin.puzzle_hash, b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_puzhash_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_puzhash(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [Program.to([]).get_tree_hash()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_PUZZLEHASH_FAILED
@pytest.mark.asyncio
async def test_correct_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(coin.amount)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_amount_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(coin.amount), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_amount_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(1000)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
@pytest.mark.asyncio
async def test_negative_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
@pytest.mark.asyncio
async def test_my_amount_too_large(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(2 ** 64)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
# the following tests generate generator programs and run them through get_name_puzzle_conditions()
COST_PER_BYTE = 12000
MAX_BLOCK_COST_CLVM = 11000000000
def generator_condition_tester(
conditions: str,
*,
safe_mode: bool = False,
quote: bool = True,
max_cost: int = MAX_BLOCK_COST_CLVM,
) -> NPCResult:
prg = f"(q ((0x0101010101010101010101010101010101010101010101010101010101010101 {"(q " if quote else ""} {conditions} {")" if quote else ""} 123 (() (q . ())))))" # noqa
print(f"program: {prg}")
program = SerializedProgram.from_bytes(binutils.assemble(prg).as_bin())
generator = BlockGenerator(program, [])
print(f"len: {len(bytes(program))}")
npc_result: NPCResult = get_name_puzzle_conditions(
generator, max_cost, cost_per_byte=COST_PER_BYTE, safe_mode=safe_mode
)
return npc_result
class TestGeneratorConditions:
def test_invalid_condition_args_terminator(self):
# note how the condition argument list isn't correctly terminated with a
# NIL atom. This is allowed, and all arguments beyond the ones we look
# at are ignored, including the termination of the list
npc_result = generator_condition_tester("(80 50 . 1)")
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([80]))
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == opcode
assert len(npc_result.npc_list[0].conditions[0][1]) == 1
c = npc_result.npc_list[0].conditions[0][1][0]
assert c == ConditionWithArgs(opcode=ConditionOpcode.ASSERT_SECONDS_RELATIVE, vars=[bytes([50])])
def test_invalid_condition_list_terminator(self):
# note how the list of conditions isn't correctly terminated with a
# NIL atom. This is a failure
npc_result = generator_condition_tester("(80 50) . 3")
assert npc_result.error in [Err.INVALID_CONDITION.value, Err.GENERATOR_RUNTIME_ERROR.value]
def test_duplicate_height_time_conditions(self):
# ASSERT_SECONDS_RELATIVE
# ASSERT_SECONDS_ABSOLUTE
# ASSERT_HEIGHT_RELATIVE
# ASSERT_HEIGHT_ABSOLUTE
for cond in [80, 81, 82, 83]:
# even though the generator outputs multiple conditions, we only
# need to return the highest one (i.e. most strict)
npc_result = generator_condition_tester(" ".join([f"({cond} {i})" for i in range(50, 101)]))
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([cond]))
max_arg = 0
assert npc_result.npc_list[0].conditions[0][0] == opcode
for c in npc_result.npc_list[0].conditions[0][1]:
assert c.opcode == opcode
max_arg = max(max_arg, int_from_bytes(c.vars[0]))
assert max_arg == 100
def test_just_announcement(self):
# CREATE_COIN_ANNOUNCEMENT
# CREATE_PUZZLE_ANNOUNCEMENT
for cond in [60, 62]:
message = "a" * 1024
# announcements are validated on the Rust side and never returned
# back. They are either satisified or cause an immediate failure
npc_result = generator_condition_tester(f'({cond} "{message}") ' * 50)
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
# create-announcements and assert-announcements are dropped once
# validated
assert npc_result.npc_list[0].conditions == []
def test_assert_announcement_fail(self):
# ASSERT_COIN_ANNOUNCEMENT
# ASSERT_PUZZLE_ANNOUNCEMENT
for cond in [61, 63]:
message = "a" * 1024
# announcements are validated on the Rust side and never returned
# back. They ar either satisified or cause an immediate failure
# in this test we just assert announcements, we never make them, so
# these should fail
npc_result = generator_condition_tester(f'({cond} "{message}") ')
assert npc_result.error == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED.value
assert npc_result.npc_list == []
def test_multiple_reserve_fee(self):
# RESERVE_FEE
cond = 52
# even though the generator outputs 3 conditions, we only need to return one copy
# with all the fees accumulated
npc_result = generator_condition_tester(f"({cond} 100) " * 3)
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([cond]))
reserve_fee = 0
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == opcode
for c in npc_result.npc_list[0].conditions[0][1]:
assert c.opcode == opcode
reserve_fee += int_from_bytes(c.vars[0])
assert reserve_fee == 300
assert len(npc_result.npc_list[0].conditions[0][1]) == 1
def test_duplicate_outputs(self):
# CREATE_COIN
# creating multiple coins with the same properties (same parent, same
# target puzzle hash and same amount) is not allowed. That's a consensus
# failure.
puzzle_hash = "abababababababababababababababab"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash}" 10) ' * 2)
assert npc_result.error == Err.DUPLICATE_OUTPUT.value
assert npc_result.npc_list == []
def test_create_coin_cost(self):
# CREATE_COIN
puzzle_hash = "abababababababababababababababab"
# this max cost is exactly enough for the create coin condition
npc_result = generator_condition_tester(
f'(51 "{puzzle_hash}" 10) ', max_cost=20470 + 95 * COST_PER_BYTE + 1800000
)
assert npc_result.error is None
assert npc_result.clvm_cost == 20470
assert len(npc_result.npc_list) == 1
# if we subtract one from max cost, this should fail
npc_result = generator_condition_tester(
f'(51 "{puzzle_hash}" 10) ', max_cost=20470 + 95 * COST_PER_BYTE + 1800000 - 1
)
assert npc_result.error in [Err.BLOCK_COST_EXCEEDS_MAX.value, Err.INVALID_BLOCK_COST.value]
def test_agg_sig_cost(self):
# AGG_SIG_ME
pubkey = "abababababababababababababababababababababababab"
# this max cost is exactly enough for the AGG_SIG condition
npc_result = generator_condition_tester(
f'(49 "{pubkey}" "foobar") ', max_cost=20512 + 117 * COST_PER_BYTE + 1200000
)
assert npc_result.error is None
assert npc_result.clvm_cost == 20512
assert len(npc_result.npc_list) == 1
# if we subtract one from max cost, this should fail
npc_result = generator_condition_tester(
f'(49 "{pubkey}" "foobar") ', max_cost=20512 + 117 * COST_PER_BYTE + 1200000 - 1
)
assert npc_result.error in [Err.BLOCK_COST_EXCEEDS_MAX.value, Err.INVALID_BLOCK_COST.value]
def test_create_coin_different_parent(self):
# if the coins we create have different parents, they are never
# considered duplicate, even when they have the same puzzle hash and
# amount
puzzle_hash = "abababababababababababababababab"
program = SerializedProgram.from_bytes(
binutils.assemble(
f'(q ((0x0101010101010101010101010101010101010101010101010101010101010101 (q (51 "{puzzle_hash}" 10)) 123 (() (q . ())))(0x0101010101010101010101010101010101010101010101010101010101010102 (q (51 "{puzzle_hash}" 10)) 123 (() (q . ()))) ))' # noqa
).as_bin()
)
generator = BlockGenerator(program, [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator, MAX_BLOCK_COST_CLVM, cost_per_byte=COST_PER_BYTE, safe_mode=False
)
assert npc_result.error is None
assert len(npc_result.npc_list) == 2
opcode = ConditionOpcode.CREATE_COIN
for c in npc_result.npc_list:
assert c.conditions == [
(
opcode.value,
[ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([10]), b""])],
)
]
def test_create_coin_different_puzzhash(self):
# CREATE_COIN
# coins with different puzzle hashes are not considered duplicate
puzzle_hash_1 = "abababababababababababababababab"
puzzle_hash_2 = "cbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcb"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash_1}" 5) (51 "{puzzle_hash_2}" 5)')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert (
ConditionWithArgs(opcode, [puzzle_hash_1.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
assert (
ConditionWithArgs(opcode, [puzzle_hash_2.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
def test_create_coin_different_amounts(self):
# CREATE_COIN
# coins with different amounts are not considered duplicate
puzzle_hash = "abababababababababababababababab"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash}" 5) (51 "{puzzle_hash}" 4)')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert (
ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
assert (
ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([4]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
def test_create_coin_with_hint(self):
# CREATE_COIN
puzzle_hash_1 = "abababababababababababababababab"
hint = "12341234123412341234213421341234"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash_1}" 5 ("{hint}"))')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert npc_result.npc_list[0].conditions[0][1][0] == ConditionWithArgs(
opcode, [puzzle_hash_1.encode("ascii"), bytes([5]), hint.encode("ascii")]
)
def test_unknown_condition(self):
for sm in [True, False]:
for c in ['(1 100 "foo" "bar")', "(100)", "(1 1) (2 2) (3 3)", '("foobar")']:
npc_result = generator_condition_tester(c, safe_mode=sm)
print(npc_result)
if sm:
assert npc_result.error == Err.INVALID_CONDITION.value
assert npc_result.npc_list == []
else:
assert npc_result.error is None
assert npc_result.npc_list[0].conditions == []
# the tests below are malicious generator programs
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (c (q . 83) (c (concat (large_string 0x00 A) (q . 100)) ())) B)
# )
# with A=28 and B specified as {num}
SINGLE_ARG_INT_COND = "(a (q 2 4 (c 2 (c (c (q . {opcode}) (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) ())) (c 11 ())))) (c (q (a (i 11 (q 4 5 (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 28 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c V ())) (iter (substr V 1) (- N 1))) ())
# )
# (iter (concat (large_string 0x00 A) (q . 100)) B)
# )
# truncates the first byte of the large string being passed down for each
# iteration, in an attempt to defeat any caching of integers by node ID.
# substr is cheap, and no memory is copied, so we can perform a lot of these
SINGLE_ARG_INT_SUBSTR_COND = "(a (q 2 4 (c 2 (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c 5 ())) (a 4 (c 2 (c (substr 5 (q . 1)) (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 28 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c V ())) (iter (substr V 0 (- (strlen V) 1)) (- N 1))) ())
# )
# (iter (concat (large_string 0x00 A) (q . 0xffffffff)) B)
# )
SINGLE_ARG_INT_SUBSTR_TAIL_COND = "(a (q 2 4 (c 2 (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c 5 ())) (a 4 (c 2 (c (substr 5 () (- (strlen 5) (q . 1))) (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 25 {num})))" # noqa
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c (concat V N) ())) (iter V (- N 1))) ())
# )
# (iter (large_string 0x00 A) B)
# )
SINGLE_ARG_INT_LADDER_COND = "(a (q 2 4 (c 2 (c (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c (concat 5 11) ())) (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 24 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_message (N)
# (lsh (q . "a") N)
# )
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (c (q . 60) (c (large_message A) ())) B)
# )
# with B set to {num}
CREATE_ANNOUNCE_COND = "(a (q 2 4 (c 2 (c (c (q . {opcode}) (c (a 6 (c 2 (c 5 ()))) ())) (c 11 ())))) (c (q (a (i 11 (q 4 5 (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 23 (q . 97) 5) (q 8184 {num})))" # noqa
# this program:
# (mod (A)
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (q 51 "abababababababababababababababab" 1) A)
# )
CREATE_COIN = '(a (q 2 2 (c 2 (c (q 51 "abababababababababababababababab" 1) (c 5 ())))) (c (q 2 (i 11 (q 4 5 (a 2 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) (q {num})))' # noqa
# this program:
# (mod (A)
# (defun append (L B)
# (if L
# (c (f L) (append (r L) B))
# (c B ())
# )
# )
# (defun iter (V N)
# (if N (c (append V N) (iter V (- N 1))) ())
# )
# (iter (q 51 "abababababababababababababababab") A)
# )
# creates {num} CREATE_COIN conditions, each with a different amount
CREATE_UNIQUE_COINS = '(a (q 2 6 (c 2 (c (q 51 "abababababababababababababababab") (c 5 ())))) (c (q (a (i 5 (q 4 9 (a 4 (c 2 (c 13 (c 11 ()))))) (q 4 11 ())) 1) 2 (i 11 (q 4 (a 4 (c 2 (c 5 (c 11 ())))) (a 6 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) (q {num})))' # noqa
class TestMaliciousGenerators:
# TODO: create a lot of announcements. The messages can be made different by
# using substr on a large buffer
# for all the height/time locks, we should only return the most strict
# condition, not all of them
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_ladder(self, opcode):
condition = SINGLE_ARG_INT_LADDER_COND.format(opcode=opcode.value[0], num=28, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [int_to_bytes(28)])],
)
]
assert run_time < 1.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer(self, opcode):
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [bytes([100])])],
)
]
assert run_time < 2.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_substr(self, opcode):
condition = SINGLE_ARG_INT_SUBSTR_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [bytes([100])])],
)
]
assert run_time < 3
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_substr_tail(self, opcode):
condition = SINGLE_ARG_INT_SUBSTR_TAIL_COND.format(
opcode=opcode.value[0], num=280, val="0xffffffff", filler="0x00"
)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
print(npc_result.npc_list[0].conditions[0][1])
assert ConditionWithArgs(opcode, [int_to_bytes(0xFFFFFFFF)]) in npc_result.npc_list[0].conditions[0][1]
assert run_time < 1
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_negative(self, opcode):
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0xff")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == []
assert run_time < 2
print(f"run time:{run_time}")
def test_duplicate_reserve_fee(self):
opcode = ConditionOpcode.RESERVE_FEE
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode.value,
[ConditionWithArgs(opcode, [int_to_bytes(100 * 280000)])],
)
]
assert run_time < 2
print(f"run time:{run_time}")
def test_duplicate_reserve_fee_negative(self):
opcode = ConditionOpcode.RESERVE_FEE
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=200000, val=100, filler="0xff")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
# RESERVE_FEE conditions fail unconditionally if they have a negative
# amount
assert npc_result.error == Err.RESERVE_FEE_CONDITION_FAILED.value
assert len(npc_result.npc_list) == 0
assert run_time < 1.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode", [ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT]
)
def test_duplicate_coin_announces(self, opcode):
condition = CREATE_ANNOUNCE_COND.format(opcode=opcode.value[0], num=5950000)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
# coin announcements are not propagated to python, but validated in rust
assert len(npc_result.npc_list[0].conditions) == 0
# TODO: optimize clvm to make this run in < 1 second
assert run_time < 16
print(f"run time:{run_time}")
def test_create_coin_duplicates(self):
# CREATE_COIN
# this program will emit 6000 identical CREATE_COIN conditions. However,
# we'll just end up looking at two of them, and fail at the first
# duplicate
condition = CREATE_COIN.format(num=600000)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error == Err.DUPLICATE_OUTPUT.value
assert len(npc_result.npc_list) == 0
assert run_time < 2
print(f"run time:{run_time}")
def test_many_create_coin(self):
# CREATE_COIN
# this program will emit many CREATE_COIN conditions, all with different
# amounts.
# the number 6095 was chosen carefully to not exceed the maximum cost
condition = CREATE_UNIQUE_COINS.format(num=6094)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == ConditionOpcode.CREATE_COIN.value
assert len(npc_result.npc_list[0].conditions[0][1]) == 6094
assert run_time < 1
print(f"run time:{run_time}")
@pytest.mark.asyncio
async def test_invalid_coin_spend_coin(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
5,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_2, blocks[-1].height)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
coin_spend_0 = recursive_replace(spend_bundle.coin_spends[0], "coin.puzzle_hash", bytes32([1] * 32))
new_bundle = recursive_replace(spend_bundle, "coin_spends", [coin_spend_0] + spend_bundle.coin_spends[1:])
assert spend_bundle is not None
res = await full_node_1.full_node.respond_transaction(new_bundle, new_bundle.name())
assert res == (MempoolInclusionStatus.FAILED, Err.INVALID_SPEND_BUNDLE)
| import asyncio
import logging
from time import time
from typing import Dict, List, Optional, Tuple, Callable
import pytest
import covid.server.ws_connection as ws
from covid.full_node.mempool import Mempool
from covid.full_node.full_node_api import FullNodeAPI
from covid.protocols import full_node_protocol
from covid.simulator.simulator_protocol import FarmNewBlockProtocol
from covid.types.announcement import Announcement
from covid.types.blockchain_format.coin import Coin
from covid.types.blockchain_format.sized_bytes import bytes32
from covid.types.coin_spend import CoinSpend
from covid.types.condition_opcodes import ConditionOpcode
from covid.types.condition_with_args import ConditionWithArgs
from covid.types.spend_bundle import SpendBundle
from covid.types.mempool_item import MempoolItem
from covid.util.clvm import int_to_bytes
from covid.util.condition_tools import conditions_for_solution
from covid.util.errors import Err
from covid.util.ints import uint64
from covid.util.hash import std_hash
from covid.types.mempool_inclusion_status import MempoolInclusionStatus
from covid.util.api_decorators import api_request, peer_required, bytes_required
from covid.full_node.mempool_check_conditions import get_name_puzzle_conditions
from covid.full_node.pending_tx_cache import PendingTxCache
from blspy import G2Element
from covid.util.recursive_replace import recursive_replace
from tests.connection_utils import connect_and_get_peer
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import bt, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
from covid.types.blockchain_format.program import Program, INFINITE_COST
from covid.consensus.cost_calculator import NPCResult
from covid.types.blockchain_format.program import SerializedProgram
from clvm_tools import binutils
from covid.types.generator_types import BlockGenerator
from clvm.casts import int_from_bytes
BURN_PUZZLE_HASH = b"0" * 32
BURN_PUZZLE_HASH_2 = b"1" * 32
WALLET_A = bt.get_pool_wallet_tool()
log = logging.getLogger(__name__)
def generate_test_spend_bundle(
coin: Coin,
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: uint64 = uint64(0),
amount: uint64 = uint64(1000),
new_puzzle_hash=BURN_PUZZLE_HASH,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = WALLET_A.generate_signed_transaction(amount, new_puzzle_hash, coin, condition_dic, fee)
assert transaction is not None
return transaction
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture(scope="module")
async def two_nodes():
async_gen = setup_simulators_and_wallets(2, 1, {})
nodes, _ = await async_gen.__anext__()
full_node_1 = nodes[0]
full_node_2 = nodes[1]
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
yield full_node_1, full_node_2, server_1, server_2
async for _ in async_gen:
yield _
def make_item(idx: int, cost: uint64 = uint64(80)) -> MempoolItem:
spend_bundle_name = bytes([idx] * 32)
return MempoolItem(
SpendBundle([], G2Element()),
uint64(0),
NPCResult(None, [], cost),
cost,
spend_bundle_name,
[],
[],
SerializedProgram(),
)
class TestPendingTxCache:
def test_recall(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
def test_fifo_limit(self):
c = PendingTxCache(200)
# each item has cost 80
items = [make_item(i) for i in range(1, 4)]
for i in items:
c.add(i)
# the max cost is 200, only two transactions will fit
# we evict items FIFO, so the to most recently added will be left
tx = c.drain()
assert tx == {items[-2].spend_bundle_name: items[-2], items[-1].spend_bundle_name: items[-1]}
def test_drain(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
# drain will clear the cache, so a second call will be empty
tx = c.drain()
assert tx == {}
def test_cost(self):
c = PendingTxCache(200)
assert c.cost() == 0
item1 = make_item(1)
c.add(item1)
# each item has cost 80
assert c.cost() == 80
item2 = make_item(2)
c.add(item2)
assert c.cost() == 160
# the first item is evicted, so the cost stays the same
item3 = make_item(3)
c.add(item3)
assert c.cost() == 160
tx = c.drain()
assert tx == {item2.spend_bundle_name: item2, item3.spend_bundle_name: item3}
assert c.cost() == 0
item4 = make_item(4)
c.add(item4)
assert c.cost() == 80
tx = c.drain()
assert tx == {item4.spend_bundle_name: item4}
class TestMempool:
@pytest.mark.asyncio
async def test_basic_mempool(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
3,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, _, server_1, _ = two_nodes
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, blocks[-1].height)
max_mempool_cost = 40000000 * 5
mempool = Mempool(max_mempool_cost)
assert mempool.get_min_fee_rate(104000) == 0
with pytest.raises(ValueError):
mempool.get_min_fee_rate(max_mempool_cost + 1)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle is not None
@peer_required
@api_request
@bytes_required
async def respond_transaction(
node: FullNodeAPI,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSCovidConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in node.full_node.full_node_store.pending_tx_request:
node.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in node.full_node.full_node_store.peers_with_tx:
node.full_node.full_node_store.peers_with_tx.pop(spend_name)
return await node.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
class TestMempoolManager:
@pytest.mark.asyncio
async def test_basic_mempool_manager(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
5,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_2, blocks[-1].height)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle is not None
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
res = await full_node_1.respond_transaction(tx, peer)
log.info(f"Res {res}")
await time_out_assert(
10,
full_node_1.full_node.mempool_manager.get_spendbundle,
spend_bundle,
spend_bundle.name(),
)
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the assert condition is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_announcement_duplicate_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp] * 100}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the create announcement is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_duplicate_announcement_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2] * 100}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_double_spend(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
spend_bundle1 = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
spend_bundle2 = generate_test_spend_bundle(
list(blocks[-1].get_included_reward_coins())[0],
new_puzzle_hash=BURN_PUZZLE_HASH_2,
)
assert spend_bundle2 is not None
tx2: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle2)
status, err = await respond_transaction(full_node_1, tx2, peer)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
sb2 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle2.name())
assert sb1 == spend_bundle1
assert sb2 is None
assert status == MempoolInclusionStatus.PENDING
assert err == Err.MEMPOOL_CONFLICT
async def send_sb(self, node, peer, sb):
tx = full_node_protocol.RespondTransaction(sb)
await node.respond_transaction(tx, peer)
async def gen_and_send_sb(self, node, peer, *args, **kwargs):
sb = generate_test_spend_bundle(*args, **kwargs)
assert sb is not None
await self.send_sb(node, peer, sb)
return sb
def assert_sb_in_pool(self, node, sb):
assert sb == node.full_node.mempool_manager.get_spendbundle(sb.name())
def assert_sb_not_in_pool(self, node, sb):
assert node.full_node.mempool_manager.get_spendbundle(sb.name()) is None
@pytest.mark.asyncio
async def test_double_spend_with_higher_fee(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coins = iter(blocks[-1].get_included_reward_coins())
coin1, coin2 = next(coins), next(coins)
coins = iter(blocks[-2].get_included_reward_coins())
coin3, coin4 = next(coins), next(coins)
sb1_1 = await self.gen_and_send_sb(full_node_1, peer, coin1)
sb1_2 = await self.gen_and_send_sb(full_node_1, peer, coin1, fee=uint64(1))
# Fee increase is insufficient, the old spendbundle must stay
self.assert_sb_in_pool(full_node_1, sb1_1)
self.assert_sb_not_in_pool(full_node_1, sb1_2)
min_fee_increase = full_node_1.full_node.mempool_manager.get_min_fee_increase()
sb1_3 = await self.gen_and_send_sb(full_node_1, peer, coin1, fee=uint64(min_fee_increase))
# Fee increase is sufficiently high, sb1_1 gets replaced with sb1_3
self.assert_sb_not_in_pool(full_node_1, sb1_1)
self.assert_sb_in_pool(full_node_1, sb1_3)
sb2 = generate_test_spend_bundle(coin2, fee=uint64(min_fee_increase))
sb12 = SpendBundle.aggregate((sb2, sb1_3))
await self.send_sb(full_node_1, peer, sb12)
# Aggregated spendbundle sb12 replaces sb1_3 since it spends a superset
# of coins spent in sb1_3
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb1_3)
sb3 = generate_test_spend_bundle(coin3, fee=uint64(min_fee_increase * 2))
sb23 = SpendBundle.aggregate((sb2, sb3))
await self.send_sb(full_node_1, peer, sb23)
# sb23 must not replace existing sb12 as the former does not spend all
# coins that are spent in the latter (specifically, coin1)
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb23)
await self.send_sb(full_node_1, peer, sb3)
# Adding non-conflicting sb3 should succeed
self.assert_sb_in_pool(full_node_1, sb3)
sb4_1 = generate_test_spend_bundle(coin4, fee=uint64(min_fee_increase))
sb1234_1 = SpendBundle.aggregate((sb12, sb3, sb4_1))
await self.send_sb(full_node_1, peer, sb1234_1)
# sb1234_1 should not be in pool as it decreases total fees per cost
self.assert_sb_not_in_pool(full_node_1, sb1234_1)
sb4_2 = generate_test_spend_bundle(coin4, fee=uint64(min_fee_increase * 2))
sb1234_2 = SpendBundle.aggregate((sb12, sb3, sb4_2))
await self.send_sb(full_node_1, peer, sb1234_2)
# sb1234_2 has a higher fee per cost than its conflicts and should get
# into mempool
self.assert_sb_in_pool(full_node_1, sb1234_2)
self.assert_sb_not_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb3)
async def condition_tester(
self,
two_nodes,
dic: Dict[ConditionOpcode, List[ConditionWithArgs]],
fee: int = 0,
num_blocks: int = 3,
coin: Optional[Coin] = None,
):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
num_blocks,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + num_blocks)
spend_bundle1 = generate_test_spend_bundle(
coin or list(blocks[-num_blocks + 2].get_included_reward_coins())[0], dic, uint64(fee)
)
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
return blocks, spend_bundle1, peer, status, err
@pytest.mark.asyncio
async def condition_tester2(self, two_nodes, test_fun: Callable[[Coin, Coin], SpendBundle]):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height if len(blocks) > 0 else -1
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coin_1 = list(blocks[-2].get_included_reward_coins())[0]
coin_2 = list(blocks[-1].get_included_reward_coins())[0]
bundle = test_fun(coin_1, coin_2)
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(bundle)
status, err = await respond_transaction(full_node_1, tx1, peer)
return blocks, bundle, status, err
@pytest.mark.asyncio
async def test_invalid_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
[int_to_bytes(start_height + 5)],
)
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_block_index_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_correct_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_block_index_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1), b"garbage"])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_negative_block_index(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(-1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_invalid_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(5)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_RELATIVE_FAILED
@pytest.mark.asyncio
async def test_block_age_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_correct_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_block_age_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_negative_block_age(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, num_blocks=4)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_correct_my_id(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin.name()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_id_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_invalid_my_id(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
coin_2 = list(blocks[-2].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin_2.name()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_COIN_ID_FAILED
@pytest.mark.asyncio
async def test_my_id_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_exceeds(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# 5 seconds should be before the next block
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 5
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_fail(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 1000
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_SECONDS_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_assert_height_pending(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
print(full_node_1.full_node.blockchain.get_peak())
current_height = full_node_1.full_node.blockchain.get_peak().height
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(current_height + 4)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.PENDING
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
@pytest.mark.asyncio
async def test_assert_time_negative(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = -1
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_now = full_node_1.full_node.blockchain.get_peak().timestamp + 5
# garbage at the end of the argument list is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, [int_to_bytes(time_now), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_exceeds(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = 3
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_SECONDS_RELATIVE_FAILED
for i in range(0, 4):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
tx2: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx2, peer)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = 0
# garbage at the end of the arguments is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_time_relative_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_time_relative_negative(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
time_relative = -3
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_SECONDS_RELATIVE, [int_to_bytes(time_relative)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# ensure one spend can assert a coin announcement from another spend
@pytest.mark.asyncio
async def test_correct_coin_announcement_consumed(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
# ensure one spend can assert a coin announcement from another spend, even
# though the conditions have garbage (ignored) at the end
@pytest.mark.asyncio
async def test_coin_announcement_garbage(self, two_nodes):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
# garbage at the end is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
# garbage at the end is ignored
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test", b"garbage"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_coin_announcement_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
# missing arg here
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_coin_announcement_missing_arg2(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# missing arg here
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_coin_announcement_too_big(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), bytes([1] * 10000))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
assert full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name()) is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=bundle
)
try:
await full_node_1.full_node.blockchain.receive_block(blocks[-1])
assert False
except AssertionError:
pass
# ensure an assert coin announcement is rejected if it doesn't match the
# create announcement
@pytest.mark.asyncio
async def test_invalid_coin_announcement_rejected(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# mismatching message
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"wrong test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_invalid_coin_announcement_rejected_two(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_1.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
# coin 2 is making the announcement, right message wrong coin
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_correct_puzzle_announcement(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes(0x80))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, [bytes(0x80)])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_puzzle_announcement_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes(0x80))
# garbage at the end is ignored
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name(), b"garbage"])
dic = {cvp.opcode: [cvp]}
# garbage at the end is ignored
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, [bytes(0x80), b"garbage"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_puzzle_announcement_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
# missing arg here
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_puzzle_announcement_missing_arg2(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# missing arg here
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement_rejected(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, bytes("test", "utf-8"))
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT,
[b"wrong test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement_rejected_two(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
def test_fun(coin_1: Coin, coin_2: Coin):
announce = Announcement(coin_2.puzzle_hash, b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
# Wrong type of Create_announcement
cvp2 = ConditionWithArgs(
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT,
[b"test"],
)
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(coin_2, dic2)
return SpendBundle.aggregate([spend_bundle1, spend_bundle2])
blocks, bundle, status, err = await self.condition_tester2(two_nodes, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is not None
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_fee_condition_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
# garbage at the end of the arguments is ignored
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is not None
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_assert_fee_condition_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_assert_fee_condition_negative_fee(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=spend_bundle1
)
assert full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name()) is None
assert (await full_node_1.full_node.blockchain.receive_block(blocks[-1]))[1] == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition_fee_too_large(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(2 ** 64)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=10)
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
blocks = bt.get_consecutive_blocks(
1, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=spend_bundle1
)
assert full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name()) is None
assert (await full_node_1.full_node.blockchain.receive_block(blocks[-1]))[1] == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_assert_fee_condition_wrong_fee(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, fee=9)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_stealing_fee(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
5,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 5)
receiver_puzzlehash = BURN_PUZZLE_HASH
cvp = ConditionWithArgs(ConditionOpcode.RESERVE_FEE, [int_to_bytes(10)])
dic = {cvp.opcode: [cvp]}
fee = 9
coin_1 = list(blocks[-2].get_included_reward_coins())[0]
coin_2 = None
for coin in list(blocks[-1].get_included_reward_coins()):
if coin.amount == coin_1.amount:
coin_2 = coin
spend_bundle1 = generate_test_spend_bundle(coin_1, dic, uint64(fee))
steal_fee_spendbundle = WALLET_A.generate_signed_transaction(
coin_1.amount + fee - 4, receiver_puzzlehash, coin_2
)
assert spend_bundle1 is not None
assert steal_fee_spendbundle is not None
combined = SpendBundle.aggregate([spend_bundle1, steal_fee_spendbundle])
assert combined.fees() == 4
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert mempool_bundle is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.RESERVE_FEE_CONDITION_FAILED
@pytest.mark.asyncio
async def test_double_spend_same_bundle(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coin = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle1 = generate_test_spend_bundle(coin)
assert spend_bundle1 is not None
spend_bundle2 = generate_test_spend_bundle(
coin,
new_puzzle_hash=BURN_PUZZLE_HASH_2,
)
assert spend_bundle2 is not None
spend_bundle_combined = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle_combined)
status, err = await respond_transaction(full_node_1, tx, peer)
sb = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle_combined.name())
assert sb is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.DOUBLE_SPEND
@pytest.mark.asyncio
async def test_agg_sig_condition(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
# this code has been changed to use generate_test_spend_bundle
# not quite sure why all the gymnastics are being performed
coin = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle_0 = generate_test_spend_bundle(coin)
unsigned: List[CoinSpend] = spend_bundle_0.coin_spends
assert len(unsigned) == 1
coin_spend: CoinSpend = unsigned[0]
err, con, cost = conditions_for_solution(coin_spend.puzzle_reveal, coin_spend.solution, INFINITE_COST)
assert con is not None
# TODO(straya): fix this test
# puzzle, solution = list(coin_spend.solution.as_iter())
# conditions_dict = conditions_by_opcode(con)
# pkm_pairs = pkm_pairs_for_conditions_dict(conditions_dict, coin_spend.coin.name())
# assert len(pkm_pairs) == 1
#
# assert pkm_pairs[0][1] == solution.rest().first().get_tree_hash() + coin_spend.coin.name()
#
# spend_bundle = WALLET_A.sign_transaction(unsigned)
# assert spend_bundle is not None
#
# tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
# await full_node_1.respond_transaction(tx, peer)
#
# sb = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle.name())
# assert sb is spend_bundle
@pytest.mark.asyncio
async def test_correct_my_parent(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin.parent_coin_info])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_parent_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin.parent_coin_info, b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_parent_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_parent(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
coin_2 = list(blocks[-2].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PARENT_ID, [coin_2.parent_coin_info])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_PARENT_ID_FAILED
@pytest.mark.asyncio
async def test_correct_my_puzhash(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [coin.puzzle_hash])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_puzhash_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [coin.puzzle_hash, b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_puzhash_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_puzhash(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_PUZZLEHASH, [Program.to([]).get_tree_hash()])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_PUZZLEHASH_FAILED
@pytest.mark.asyncio
async def test_correct_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(coin.amount)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_amount_garbage(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
coin = list(blocks[-1].get_included_reward_coins())[0]
# garbage at the end of the arguments list is allowed but stripped
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(coin.amount), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic, coin=coin)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
assert err is None
@pytest.mark.asyncio
async def test_my_amount_missing_arg(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.INVALID_CONDITION
@pytest.mark.asyncio
async def test_invalid_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(1000)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
@pytest.mark.asyncio
async def test_negative_my_amount(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
@pytest.mark.asyncio
async def test_my_amount_too_large(self, two_nodes):
full_node_1, full_node_2, server_1, server_2 = two_nodes
blocks = await full_node_1.get_all_full_blocks()
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_AMOUNT, [int_to_bytes(2 ** 64)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(two_nodes, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
assert err == Err.ASSERT_MY_AMOUNT_FAILED
# the following tests generate generator programs and run them through get_name_puzzle_conditions()
COST_PER_BYTE = 12000
MAX_BLOCK_COST_CLVM = 11000000000
def generator_condition_tester(
conditions: str,
*,
safe_mode: bool = False,
quote: bool = True,
max_cost: int = MAX_BLOCK_COST_CLVM,
) -> NPCResult:
prg = f"(q ((0x0101010101010101010101010101010101010101010101010101010101010101 {'(q ' if quote else ''} {conditions} {')' if quote else ''} 123 (() (q . ())))))" # noqa
print(f"program: {prg}")
program = SerializedProgram.from_bytes(binutils.assemble(prg).as_bin())
generator = BlockGenerator(program, [])
print(f"len: {len(bytes(program))}")
npc_result: NPCResult = get_name_puzzle_conditions(
generator, max_cost, cost_per_byte=COST_PER_BYTE, safe_mode=safe_mode
)
return npc_result
class TestGeneratorConditions:
def test_invalid_condition_args_terminator(self):
# note how the condition argument list isn't correctly terminated with a
# NIL atom. This is allowed, and all arguments beyond the ones we look
# at are ignored, including the termination of the list
npc_result = generator_condition_tester("(80 50 . 1)")
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([80]))
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == opcode
assert len(npc_result.npc_list[0].conditions[0][1]) == 1
c = npc_result.npc_list[0].conditions[0][1][0]
assert c == ConditionWithArgs(opcode=ConditionOpcode.ASSERT_SECONDS_RELATIVE, vars=[bytes([50])])
def test_invalid_condition_list_terminator(self):
# note how the list of conditions isn't correctly terminated with a
# NIL atom. This is a failure
npc_result = generator_condition_tester("(80 50) . 3")
assert npc_result.error in [Err.INVALID_CONDITION.value, Err.GENERATOR_RUNTIME_ERROR.value]
def test_duplicate_height_time_conditions(self):
# ASSERT_SECONDS_RELATIVE
# ASSERT_SECONDS_ABSOLUTE
# ASSERT_HEIGHT_RELATIVE
# ASSERT_HEIGHT_ABSOLUTE
for cond in [80, 81, 82, 83]:
# even though the generator outputs multiple conditions, we only
# need to return the highest one (i.e. most strict)
npc_result = generator_condition_tester(" ".join([f"({cond} {i})" for i in range(50, 101)]))
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([cond]))
max_arg = 0
assert npc_result.npc_list[0].conditions[0][0] == opcode
for c in npc_result.npc_list[0].conditions[0][1]:
assert c.opcode == opcode
max_arg = max(max_arg, int_from_bytes(c.vars[0]))
assert max_arg == 100
def test_just_announcement(self):
# CREATE_COIN_ANNOUNCEMENT
# CREATE_PUZZLE_ANNOUNCEMENT
for cond in [60, 62]:
message = "a" * 1024
# announcements are validated on the Rust side and never returned
# back. They are either satisified or cause an immediate failure
npc_result = generator_condition_tester(f'({cond} "{message}") ' * 50)
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
# create-announcements and assert-announcements are dropped once
# validated
assert npc_result.npc_list[0].conditions == []
def test_assert_announcement_fail(self):
# ASSERT_COIN_ANNOUNCEMENT
# ASSERT_PUZZLE_ANNOUNCEMENT
for cond in [61, 63]:
message = "a" * 1024
# announcements are validated on the Rust side and never returned
# back. They ar either satisified or cause an immediate failure
# in this test we just assert announcements, we never make them, so
# these should fail
npc_result = generator_condition_tester(f'({cond} "{message}") ')
assert npc_result.error == Err.ASSERT_ANNOUNCE_CONSUMED_FAILED.value
assert npc_result.npc_list == []
def test_multiple_reserve_fee(self):
# RESERVE_FEE
cond = 52
# even though the generator outputs 3 conditions, we only need to return one copy
# with all the fees accumulated
npc_result = generator_condition_tester(f"({cond} 100) " * 3)
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode(bytes([cond]))
reserve_fee = 0
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == opcode
for c in npc_result.npc_list[0].conditions[0][1]:
assert c.opcode == opcode
reserve_fee += int_from_bytes(c.vars[0])
assert reserve_fee == 300
assert len(npc_result.npc_list[0].conditions[0][1]) == 1
def test_duplicate_outputs(self):
# CREATE_COIN
# creating multiple coins with the same properties (same parent, same
# target puzzle hash and same amount) is not allowed. That's a consensus
# failure.
puzzle_hash = "abababababababababababababababab"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash}" 10) ' * 2)
assert npc_result.error == Err.DUPLICATE_OUTPUT.value
assert npc_result.npc_list == []
def test_create_coin_cost(self):
# CREATE_COIN
puzzle_hash = "abababababababababababababababab"
# this max cost is exactly enough for the create coin condition
npc_result = generator_condition_tester(
f'(51 "{puzzle_hash}" 10) ', max_cost=20470 + 95 * COST_PER_BYTE + 1800000
)
assert npc_result.error is None
assert npc_result.clvm_cost == 20470
assert len(npc_result.npc_list) == 1
# if we subtract one from max cost, this should fail
npc_result = generator_condition_tester(
f'(51 "{puzzle_hash}" 10) ', max_cost=20470 + 95 * COST_PER_BYTE + 1800000 - 1
)
assert npc_result.error in [Err.BLOCK_COST_EXCEEDS_MAX.value, Err.INVALID_BLOCK_COST.value]
def test_agg_sig_cost(self):
# AGG_SIG_ME
pubkey = "abababababababababababababababababababababababab"
# this max cost is exactly enough for the AGG_SIG condition
npc_result = generator_condition_tester(
f'(49 "{pubkey}" "foobar") ', max_cost=20512 + 117 * COST_PER_BYTE + 1200000
)
assert npc_result.error is None
assert npc_result.clvm_cost == 20512
assert len(npc_result.npc_list) == 1
# if we subtract one from max cost, this should fail
npc_result = generator_condition_tester(
f'(49 "{pubkey}" "foobar") ', max_cost=20512 + 117 * COST_PER_BYTE + 1200000 - 1
)
assert npc_result.error in [Err.BLOCK_COST_EXCEEDS_MAX.value, Err.INVALID_BLOCK_COST.value]
def test_create_coin_different_parent(self):
# if the coins we create have different parents, they are never
# considered duplicate, even when they have the same puzzle hash and
# amount
puzzle_hash = "abababababababababababababababab"
program = SerializedProgram.from_bytes(
binutils.assemble(
f'(q ((0x0101010101010101010101010101010101010101010101010101010101010101 (q (51 "{puzzle_hash}" 10)) 123 (() (q . ())))(0x0101010101010101010101010101010101010101010101010101010101010102 (q (51 "{puzzle_hash}" 10)) 123 (() (q . ()))) ))' # noqa
).as_bin()
)
generator = BlockGenerator(program, [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator, MAX_BLOCK_COST_CLVM, cost_per_byte=COST_PER_BYTE, safe_mode=False
)
assert npc_result.error is None
assert len(npc_result.npc_list) == 2
opcode = ConditionOpcode.CREATE_COIN
for c in npc_result.npc_list:
assert c.conditions == [
(
opcode.value,
[ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([10]), b""])],
)
]
def test_create_coin_different_puzzhash(self):
# CREATE_COIN
# coins with different puzzle hashes are not considered duplicate
puzzle_hash_1 = "abababababababababababababababab"
puzzle_hash_2 = "cbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcb"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash_1}" 5) (51 "{puzzle_hash_2}" 5)')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert (
ConditionWithArgs(opcode, [puzzle_hash_1.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
assert (
ConditionWithArgs(opcode, [puzzle_hash_2.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
def test_create_coin_different_amounts(self):
# CREATE_COIN
# coins with different amounts are not considered duplicate
puzzle_hash = "abababababababababababababababab"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash}" 5) (51 "{puzzle_hash}" 4)')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert (
ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([5]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
assert (
ConditionWithArgs(opcode, [puzzle_hash.encode("ascii"), bytes([4]), b""])
in npc_result.npc_list[0].conditions[0][1]
)
def test_create_coin_with_hint(self):
# CREATE_COIN
puzzle_hash_1 = "abababababababababababababababab"
hint = "12341234123412341234213421341234"
npc_result = generator_condition_tester(f'(51 "{puzzle_hash_1}" 5 ("{hint}"))')
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
opcode = ConditionOpcode.CREATE_COIN
assert npc_result.npc_list[0].conditions[0][1][0] == ConditionWithArgs(
opcode, [puzzle_hash_1.encode("ascii"), bytes([5]), hint.encode("ascii")]
)
def test_unknown_condition(self):
for sm in [True, False]:
for c in ['(1 100 "foo" "bar")', "(100)", "(1 1) (2 2) (3 3)", '("foobar")']:
npc_result = generator_condition_tester(c, safe_mode=sm)
print(npc_result)
if sm:
assert npc_result.error == Err.INVALID_CONDITION.value
assert npc_result.npc_list == []
else:
assert npc_result.error is None
assert npc_result.npc_list[0].conditions == []
# the tests below are malicious generator programs
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (c (q . 83) (c (concat (large_string 0x00 A) (q . 100)) ())) B)
# )
# with A=28 and B specified as {num}
SINGLE_ARG_INT_COND = "(a (q 2 4 (c 2 (c (c (q . {opcode}) (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) ())) (c 11 ())))) (c (q (a (i 11 (q 4 5 (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 28 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c V ())) (iter (substr V 1) (- N 1))) ())
# )
# (iter (concat (large_string 0x00 A) (q . 100)) B)
# )
# truncates the first byte of the large string being passed down for each
# iteration, in an attempt to defeat any caching of integers by node ID.
# substr is cheap, and no memory is copied, so we can perform a lot of these
SINGLE_ARG_INT_SUBSTR_COND = "(a (q 2 4 (c 2 (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c 5 ())) (a 4 (c 2 (c (substr 5 (q . 1)) (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 28 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c V ())) (iter (substr V 0 (- (strlen V) 1)) (- N 1))) ())
# )
# (iter (concat (large_string 0x00 A) (q . 0xffffffff)) B)
# )
SINGLE_ARG_INT_SUBSTR_TAIL_COND = "(a (q 2 4 (c 2 (c (concat (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (q . {val})) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c 5 ())) (a 4 (c 2 (c (substr 5 () (- (strlen 5) (q . 1))) (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 25 {num})))" # noqa
# (mod (A B)
# (defun large_string (V N)
# (if N (large_string (concat V V) (- N 1)) V)
# )
# (defun iter (V N)
# (if N (c (c (q . 83) (c (concat V N) ())) (iter V (- N 1))) ())
# )
# (iter (large_string 0x00 A) B)
# )
SINGLE_ARG_INT_LADDER_COND = "(a (q 2 4 (c 2 (c (a 6 (c 2 (c (q . {filler}) (c 5 ())))) (c 11 ())))) (c (q (a (i 11 (q 4 (c (q . {opcode}) (c (concat 5 11) ())) (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 2 (i 11 (q 2 6 (c 2 (c (concat 5 5) (c (- 11 (q . 1)) ())))) (q . 5)) 1) (q 24 {num})))" # noqa
# this program:
# (mod (A B)
# (defun large_message (N)
# (lsh (q . "a") N)
# )
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (c (q . 60) (c (large_message A) ())) B)
# )
# with B set to {num}
CREATE_ANNOUNCE_COND = "(a (q 2 4 (c 2 (c (c (q . {opcode}) (c (a 6 (c 2 (c 5 ()))) ())) (c 11 ())))) (c (q (a (i 11 (q 4 5 (a 4 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) 23 (q . 97) 5) (q 8184 {num})))" # noqa
# this program:
# (mod (A)
# (defun iter (V N)
# (if N (c V (iter V (- N 1))) ())
# )
# (iter (q 51 "abababababababababababababababab" 1) A)
# )
CREATE_COIN = '(a (q 2 2 (c 2 (c (q 51 "abababababababababababababababab" 1) (c 5 ())))) (c (q 2 (i 11 (q 4 5 (a 2 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) (q {num})))' # noqa
# this program:
# (mod (A)
# (defun append (L B)
# (if L
# (c (f L) (append (r L) B))
# (c B ())
# )
# )
# (defun iter (V N)
# (if N (c (append V N) (iter V (- N 1))) ())
# )
# (iter (q 51 "abababababababababababababababab") A)
# )
# creates {num} CREATE_COIN conditions, each with a different amount
CREATE_UNIQUE_COINS = '(a (q 2 6 (c 2 (c (q 51 "abababababababababababababababab") (c 5 ())))) (c (q (a (i 5 (q 4 9 (a 4 (c 2 (c 13 (c 11 ()))))) (q 4 11 ())) 1) 2 (i 11 (q 4 (a 4 (c 2 (c 5 (c 11 ())))) (a 6 (c 2 (c 5 (c (- 11 (q . 1)) ()))))) ()) 1) (q {num})))' # noqa
class TestMaliciousGenerators:
# TODO: create a lot of announcements. The messages can be made different by
# using substr on a large buffer
# for all the height/time locks, we should only return the most strict
# condition, not all of them
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_ladder(self, opcode):
condition = SINGLE_ARG_INT_LADDER_COND.format(opcode=opcode.value[0], num=28, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [int_to_bytes(28)])],
)
]
assert run_time < 1.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer(self, opcode):
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [bytes([100])])],
)
]
assert run_time < 2.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_substr(self, opcode):
condition = SINGLE_ARG_INT_SUBSTR_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode,
[ConditionWithArgs(opcode, [bytes([100])])],
)
]
assert run_time < 3
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_substr_tail(self, opcode):
condition = SINGLE_ARG_INT_SUBSTR_TAIL_COND.format(
opcode=opcode.value[0], num=280, val="0xffffffff", filler="0x00"
)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
print(npc_result.npc_list[0].conditions[0][1])
assert ConditionWithArgs(opcode, [int_to_bytes(0xFFFFFFFF)]) in npc_result.npc_list[0].conditions[0][1]
assert run_time < 1
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode",
[
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE,
ConditionOpcode.ASSERT_SECONDS_RELATIVE,
],
)
def test_duplicate_large_integer_negative(self, opcode):
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0xff")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == []
assert run_time < 2
print(f"run time:{run_time}")
def test_duplicate_reserve_fee(self):
opcode = ConditionOpcode.RESERVE_FEE
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=280000, val=100, filler="0x00")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert npc_result.npc_list[0].conditions == [
(
opcode.value,
[ConditionWithArgs(opcode, [int_to_bytes(100 * 280000)])],
)
]
assert run_time < 2
print(f"run time:{run_time}")
def test_duplicate_reserve_fee_negative(self):
opcode = ConditionOpcode.RESERVE_FEE
condition = SINGLE_ARG_INT_COND.format(opcode=opcode.value[0], num=200000, val=100, filler="0xff")
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
# RESERVE_FEE conditions fail unconditionally if they have a negative
# amount
assert npc_result.error == Err.RESERVE_FEE_CONDITION_FAILED.value
assert len(npc_result.npc_list) == 0
assert run_time < 1.5
print(f"run time:{run_time}")
@pytest.mark.parametrize(
"opcode", [ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT]
)
def test_duplicate_coin_announces(self, opcode):
condition = CREATE_ANNOUNCE_COND.format(opcode=opcode.value[0], num=5950000)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
# coin announcements are not propagated to python, but validated in rust
assert len(npc_result.npc_list[0].conditions) == 0
# TODO: optimize clvm to make this run in < 1 second
assert run_time < 16
print(f"run time:{run_time}")
def test_create_coin_duplicates(self):
# CREATE_COIN
# this program will emit 6000 identical CREATE_COIN conditions. However,
# we'll just end up looking at two of them, and fail at the first
# duplicate
condition = CREATE_COIN.format(num=600000)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error == Err.DUPLICATE_OUTPUT.value
assert len(npc_result.npc_list) == 0
assert run_time < 2
print(f"run time:{run_time}")
def test_many_create_coin(self):
# CREATE_COIN
# this program will emit many CREATE_COIN conditions, all with different
# amounts.
# the number 6095 was chosen carefully to not exceed the maximum cost
condition = CREATE_UNIQUE_COINS.format(num=6094)
start_time = time()
npc_result = generator_condition_tester(condition, quote=False)
run_time = time() - start_time
assert npc_result.error is None
assert len(npc_result.npc_list) == 1
assert len(npc_result.npc_list[0].conditions) == 1
assert npc_result.npc_list[0].conditions[0][0] == ConditionOpcode.CREATE_COIN.value
assert len(npc_result.npc_list[0].conditions[0][1]) == 6094
assert run_time < 1
print(f"run time:{run_time}")
@pytest.mark.asyncio
async def test_invalid_coin_spend_coin(self, two_nodes):
reward_ph = WALLET_A.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
5,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_1, full_node_2, server_1, server_2 = two_nodes
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_2, blocks[-1].height)
spend_bundle = generate_test_spend_bundle(list(blocks[-1].get_included_reward_coins())[0])
coin_spend_0 = recursive_replace(spend_bundle.coin_spends[0], "coin.puzzle_hash", bytes32([1] * 32))
new_bundle = recursive_replace(spend_bundle, "coin_spends", [coin_spend_0] + spend_bundle.coin_spends[1:])
assert spend_bundle is not None
res = await full_node_1.full_node.respond_transaction(new_bundle, new_bundle.name())
assert res == (MempoolInclusionStatus.FAILED, Err.INVALID_SPEND_BUNDLE)
|
# -*- coding: utf-8 -*-
from typing import List
import pandas as pd
from zvt.api.utils import to_report_period_type, value_to_pct
from zvt.contract import ActorType
from zvt.contract.api import df_to_db
from zvt.contract.recorder import TimestampsDataRecorder
from zvt.domain import Stock, ActorMeta
from zvt.domain.actor.stock_actor import StockTopTenHolder, StockInstitutionalInvestorHolder
from zvt.recorders.em.em_api import get_holder_report_dates, get_holders
from zvt.utils import to_pd_timestamp, to_time_str
class EMStockTopTenRecorder(TimestampsDataRecorder):
entity_provider = 'joinquant'
entity_schema = Stock
provider = 'em'
data_schema = StockTopTenHolder
def init_timestamps(self, entity_item) -> List[pd.Timestamp]:
result = get_holder_report_dates(code=entity_item.code)
if result:
return [to_pd_timestamp(item['END_DATE']) for item in result]
def on_finish_entity(self, entity):
super().on_finish_entity(entity)
super().on_finish_entity(entity)
holders = StockTopTenHolder.query_data(entity_id=entity.id,
filters=[StockTopTenHolder.holding_values == None],
session=self.session, return_type='domain')
for holder in holders:
ii = StockInstitutionalInvestorHolder.query_data(entity_id=entity.id,
filters=[
StockInstitutionalInvestorHolder.holding_values > 1,
StockInstitutionalInvestorHolder.holding_ratio > 0.01,
StockInstitutionalInvestorHolder.timestamp == holder.timestamp],
limit=1, return_type='domain')
if ii:
holder.holding_values = holder.holding_ratio * ii[0].holding_values / ii[0].holding_ratio
self.session.commit()
def record(self, entity, start, end, size, timestamps):
for timestamp in timestamps:
the_date = to_time_str(timestamp)
result = get_holders(code=entity.code, end_date=the_date)
if result:
holders = []
new_actors = []
for item in result:
# 机构
if item['IS_HOLDORG'] == '1':
domains: List[ActorMeta] = ActorMeta.query_data(filters=[ActorMeta.code == item['HOLDER_CODE']],
return_type='domain')
if not domains:
actor_type = ActorType.corporation.value
actor = ActorMeta(entity_id=f'{actor_type}_cn_{item['HOLDER_CODE']}',
id=f'{actor_type}_cn_{item['HOLDER_CODE']}',
entity_type=actor_type,
exchange='cn',
code=item["HOLDER_CODE"],
name=item["HOLDER_NAME"])
else:
actor = domains[0]
else:
actor_type = ActorType.individual.value
actor = ActorMeta(entity_id=f'{actor_type}_cn_{item['HOLDER_NAME']}',
id=f'{actor_type}_cn_{item['HOLDER_NAME']}',
entity_type=actor_type,
exchange='cn',
code=item["HOLDER_NAME"],
name=item["HOLDER_NAME"])
new_actors.append(actor.__dict__)
holder = {'id': f'{entity.entity_id}_{the_date}_{actor.entity_id}',
'entity_id': entity.entity_id,
'timestamp': timestamp,
'code': entity.code,
'name': entity.name,
'actor_id': actor.entity_id,
'actor_type': actor.entity_type,
'actor_code': actor.code,
'actor_name': actor.name,
'report_date': timestamp,
'report_period': to_report_period_type(timestamp),
'holding_numbers': item['HOLD_NUM'],
'holding_ratio': value_to_pct(item['HOLD_NUM_RATIO'], default=0)}
holders.append(holder)
if holders:
df = pd.DataFrame.from_records(holders)
df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,
force_update=True)
if new_actors:
df = pd.DataFrame.from_records(new_actors)
df_to_db(data_schema=ActorMeta, df=df, provider=self.provider,
force_update=False)
if __name__ == '__main__':
EMStockTopTenRecorder(codes=['000002']).run()
# the __all__ is generated
__all__ = ['EMStockTopTenRecorder'] | # -*- coding: utf-8 -*-
from typing import List
import pandas as pd
from zvt.api.utils import to_report_period_type, value_to_pct
from zvt.contract import ActorType
from zvt.contract.api import df_to_db
from zvt.contract.recorder import TimestampsDataRecorder
from zvt.domain import Stock, ActorMeta
from zvt.domain.actor.stock_actor import StockTopTenHolder, StockInstitutionalInvestorHolder
from zvt.recorders.em.em_api import get_holder_report_dates, get_holders
from zvt.utils import to_pd_timestamp, to_time_str
class EMStockTopTenRecorder(TimestampsDataRecorder):
entity_provider = 'joinquant'
entity_schema = Stock
provider = 'em'
data_schema = StockTopTenHolder
def init_timestamps(self, entity_item) -> List[pd.Timestamp]:
result = get_holder_report_dates(code=entity_item.code)
if result:
return [to_pd_timestamp(item['END_DATE']) for item in result]
def on_finish_entity(self, entity):
super().on_finish_entity(entity)
super().on_finish_entity(entity)
holders = StockTopTenHolder.query_data(entity_id=entity.id,
filters=[StockTopTenHolder.holding_values == None],
session=self.session, return_type='domain')
for holder in holders:
ii = StockInstitutionalInvestorHolder.query_data(entity_id=entity.id,
filters=[
StockInstitutionalInvestorHolder.holding_values > 1,
StockInstitutionalInvestorHolder.holding_ratio > 0.01,
StockInstitutionalInvestorHolder.timestamp == holder.timestamp],
limit=1, return_type='domain')
if ii:
holder.holding_values = holder.holding_ratio * ii[0].holding_values / ii[0].holding_ratio
self.session.commit()
def record(self, entity, start, end, size, timestamps):
for timestamp in timestamps:
the_date = to_time_str(timestamp)
result = get_holders(code=entity.code, end_date=the_date)
if result:
holders = []
new_actors = []
for item in result:
# 机构
if item['IS_HOLDORG'] == '1':
domains: List[ActorMeta] = ActorMeta.query_data(filters=[ActorMeta.code == item['HOLDER_CODE']],
return_type='domain')
if not domains:
actor_type = ActorType.corporation.value
actor = ActorMeta(entity_id=f'{actor_type}_cn_{item["HOLDER_CODE"]}',
id=f'{actor_type}_cn_{item["HOLDER_CODE"]}',
entity_type=actor_type,
exchange='cn',
code=item["HOLDER_CODE"],
name=item["HOLDER_NAME"])
else:
actor = domains[0]
else:
actor_type = ActorType.individual.value
actor = ActorMeta(entity_id=f'{actor_type}_cn_{item["HOLDER_NAME"]}',
id=f'{actor_type}_cn_{item["HOLDER_NAME"]}',
entity_type=actor_type,
exchange='cn',
code=item["HOLDER_NAME"],
name=item["HOLDER_NAME"])
new_actors.append(actor.__dict__)
holder = {'id': f'{entity.entity_id}_{the_date}_{actor.entity_id}',
'entity_id': entity.entity_id,
'timestamp': timestamp,
'code': entity.code,
'name': entity.name,
'actor_id': actor.entity_id,
'actor_type': actor.entity_type,
'actor_code': actor.code,
'actor_name': actor.name,
'report_date': timestamp,
'report_period': to_report_period_type(timestamp),
'holding_numbers': item['HOLD_NUM'],
'holding_ratio': value_to_pct(item['HOLD_NUM_RATIO'], default=0)}
holders.append(holder)
if holders:
df = pd.DataFrame.from_records(holders)
df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,
force_update=True)
if new_actors:
df = pd.DataFrame.from_records(new_actors)
df_to_db(data_schema=ActorMeta, df=df, provider=self.provider,
force_update=False)
if __name__ == '__main__':
EMStockTopTenRecorder(codes=['000002']).run()
# the __all__ is generated
__all__ = ['EMStockTopTenRecorder'] |
import datetime
import keras
import numpy as np
import tokenization
import tensorflow as tf
import tensorflow_hub as hub
from config import *
def model_train(model_type, train, test, is_training=False):
if model_type == "bert":
bert_layer = hub.KerasLayer(mBERT_MODULE_URL, trainable=True)
else:
bert_layer = hub.KerasLayer(MuRIL_MODULE_URL, trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
test_input = bert_encode(test.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)
label_list = list(range(len(train["label"].unique())))
model = build_model(bert_layer, num_classes=len(label_list))
if is_training:
train_input = bert_encode(train.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)
train_labels = keras.utils.to_categorical(
train.label.values, num_classes=len(label_list)
)
checkpoint = tf.keras.callbacks.ModelCheckpoint(
f"{model_type}_model_{datetime.datetime.now().strftime("%Y%m%d%H%M%S")}.h5",
monitor="val_accuracy",
save_best_only=True,
verbose=1,
)
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=5, verbose=1
)
model.fit(
train_input,
train_labels,
epochs=NUM_TRAIN_EPOCHS,
callbacks=[checkpoint, earlystopping],
batch_size=BATCH_SIZE,
verbose=1,
)
else:
model.load_weights(f"{model_type}_model.h5")
return model, test_input
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[: max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, num_classes):
if num_classes == 2:
loss = "binary_crossentropy"
else:
loss = "categorical_crossentropy"
inputs = dict(
input_word_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
input_mask=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
input_type_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
)
output = bert_layer(inputs)
clf_output = output["sequence_output"][:, 0, :]
net = tf.keras.layers.Dense(64, activation="relu")(clf_output)
net = tf.keras.layers.Dropout(0.2)(net)
net = tf.keras.layers.Dense(BATCH_SIZE, activation="relu")(net)
net = tf.keras.layers.Dropout(0.2)(net)
out = tf.keras.layers.Dense(num_classes, activation="softmax")(net)
model = tf.keras.models.Model(inputs=inputs, outputs=out)
model.compile(
tf.keras.optimizers.Adam(lr=LEARNING_RATE),
loss=loss,
metrics=["accuracy"],
)
return model
| import datetime
import keras
import numpy as np
import tokenization
import tensorflow as tf
import tensorflow_hub as hub
from config import *
def model_train(model_type, train, test, is_training=False):
if model_type == "bert":
bert_layer = hub.KerasLayer(mBERT_MODULE_URL, trainable=True)
else:
bert_layer = hub.KerasLayer(MuRIL_MODULE_URL, trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
test_input = bert_encode(test.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)
label_list = list(range(len(train["label"].unique())))
model = build_model(bert_layer, num_classes=len(label_list))
if is_training:
train_input = bert_encode(train.text.values, tokenizer, max_len=MAX_SEQ_LENGTH)
train_labels = keras.utils.to_categorical(
train.label.values, num_classes=len(label_list)
)
checkpoint = tf.keras.callbacks.ModelCheckpoint(
f"{model_type}_model_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.h5",
monitor="val_accuracy",
save_best_only=True,
verbose=1,
)
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=5, verbose=1
)
model.fit(
train_input,
train_labels,
epochs=NUM_TRAIN_EPOCHS,
callbacks=[checkpoint, earlystopping],
batch_size=BATCH_SIZE,
verbose=1,
)
else:
model.load_weights(f"{model_type}_model.h5")
return model, test_input
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[: max_len - 2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence) + [0] * pad_len
pad_masks = [1] * len(input_sequence) + [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments)
def build_model(bert_layer, num_classes):
if num_classes == 2:
loss = "binary_crossentropy"
else:
loss = "categorical_crossentropy"
inputs = dict(
input_word_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
input_mask=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
input_type_ids=tf.keras.layers.Input(shape=(MAX_SEQ_LENGTH,), dtype=tf.int32),
)
output = bert_layer(inputs)
clf_output = output["sequence_output"][:, 0, :]
net = tf.keras.layers.Dense(64, activation="relu")(clf_output)
net = tf.keras.layers.Dropout(0.2)(net)
net = tf.keras.layers.Dense(BATCH_SIZE, activation="relu")(net)
net = tf.keras.layers.Dropout(0.2)(net)
out = tf.keras.layers.Dense(num_classes, activation="softmax")(net)
model = tf.keras.models.Model(inputs=inputs, outputs=out)
model.compile(
tf.keras.optimizers.Adam(lr=LEARNING_RATE),
loss=loss,
metrics=["accuracy"],
)
return model
|
from utility_functions.date_period import date_period, bound_date_check
from utility_functions.benchmark import timer
from utility_functions.databricks_uf import clone
from utility_functions.custom_errors import *
from connect2Databricks.read2Databricks import redshift_cdw_read
from pyspark.sql.functions import split, explode, col, ltrim, rtrim, coalesce, countDistinct, broadcast
from pyspark.sql.types import StructField, StringType, DecimalType, DateType, IntegerType, TimestampType, StructType
from pyspark.sql.window import Window
import pyspark.sql.functions as func
import pickle
from connect2Databricks.spark_init import spark_init
import logging
module_logger = logging.getLogger('CVM.cvm_pre_processing')
if 'spark' not in locals():
print('Environment: Databricks-Connect')
spark, sqlContext, setting = spark_init()
class MarketBasketPullHistory:
def __init__(self,
start_date: str,
period: int,
env: str,
debug: bool = False,
):
self.start_date = start_date
self.period = period
self.env = env
self.debug = debug
def __repr__(self):
return f'Pull history for {self.period} days from the {self.start_date} in {self.env} instance.'
@timer
def lsg_omni(self):
start_date, end_date = date_period(self.period, self.start_date)
table_name = 'datalake_omni.omni_hit_data'
dt_col_name = 'hit_time_gmt_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT ' \
'VS.visit_session_key AS session_key, ' \
'HIT.post_visid_combined AS visit_id, ' \
'HIT.visit_return_count AS visit_number, ' \
'UPPER(TRIM(prod_list)) AS prod_list, ' \
'HIT.hit_time_gmt_ts AS time_stamp, ' \
"TRIM(SUBSTRING(TRIM(DEMANDBASE), 0, POSITION('|' IN TRIM(DEMANDBASE)))) AS " \
"account_no " \
'FROM datalake_omni.omni_hit_data HIT ' \
'LEFT JOIN CDWDS.D_OMNI_VISIT_SESSION VS ON ' \
' VS.VISIT_RETURN_COUNT=HIT.VISIT_RETURN_COUNT AND VS.POST_VISID_COMBINED=HIT.POST_VISID_COMBINED ' \
f'WHERE HIT.hit_time_gmt_dt_key<{start_date} AND HIT.hit_time_gmt_dt_key>={end_date} ' \
'AND HIT.post_visid_combined IS NOT NULL ' \
"AND prod_list IS NOT NULL AND prod_list NOT LIKE '%shipping-handling%' " \
"AND TRIM(SUBSTRING(TRIM(DEMANDBASE), 0, POSITION('|' IN TRIM(DEMANDBASE)))) <> '' "
schema = StructType([
StructField('session_key', IntegerType(), True),
StructField('visit_id', StringType(), True),
StructField('visit_number', IntegerType(), True),
StructField('time_stamp', StringType(), True),
StructField('prod_list', StringType(), True),
StructField('account_no', StringType(), True),
])
df = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env, schema = schema). \
withColumn('prod_id_untrimmed', explode(split('prod_list', ','))). \
withColumn('prod_id', ltrim(rtrim(col('prod_id_untrimmed')))). \
drop('prod_id_untrimmed'). \
drop('prod_list'). \
filter(col('prod_id').isNotNull()). \
filter(col('prod_id') != ''). \
distinct()
if self.debug:
print(f'row count for df = {df.count()}')
# find active products
query = 'SELECT sku as prod_id, stk_type_cd '\
'FROM cdwds.lsg_prod_v ' \
"WHERE stk_type_cd = 'D'"
discontinued_prods = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
df = df.join(discontinued_prods, ['prod_id'], how = 'left').\
filter(col('stk_type_cd').isNull()).\
drop('stk_type_cd')
if self.debug:
print(f'After filtering out discontinued SKUs, row count for df = {df.count()}')
query = 'SELECT UPPER(sku_nbr) AS prod_id, size_grp AS coupon ' \
'FROM cdwds.f_web_prod_feature ' \
"WHERE size_grp IS NOT NULL AND size_grp <> 'T' " \
'GROUP BY sku_nbr, size_grp'
coupons = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if coupons.count() == 0:
raise DataValidityError('No coupon information. Please check the validity of size_grp column '
'on cdwds.f_web_prod_feature.')
df = df.join(broadcast(coupons), ['prod_id'], how = 'left').\
withColumn('coupon', coalesce('coupon', 'prod_id'))
prod_list = df.select('prod_id').distinct()
coupons = coupons.union(df.select('prod_id', 'coupon')).\
filter(col('prod_id').isNotNull()).\
distinct().\
withColumn("coupon_key", func.dense_rank().over(Window.orderBy('coupon')))
df = df.join(coupons, ['prod_id', 'coupon'], how = 'left')
if self.debug:
coupons.show()
df.show()
print(f'row count for coupons = {coupons.select(col('coupon_key')).distinct().count()}')
return df, prod_list, coupons
@timer
def ccg_omni(self):
start_date = self.start_date
period = self.period
env = self.env
df = []
prod_list = df.select('prod_id').distinct()
coupons = []
return df, prod_list, coupons
@timer
def lsg_sales(self, prod_list, coupons):
start_date, end_date = date_period(self.period, self.start_date)
# Check bound date
table_name = 'cdwds.lsg_f_sls_invc'
dt_col_name = 'invc_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT '\
'UPPER(prod_prc_ref_sku) AS prod_id, sum(ext_net_sls_pmar_amt) AS sales ' \
'FROM cdwds.lsg_f_sls_invc I' \
'LEFT JOIN cdwds.lsg_prod_v P ON P.sku = prod_prc_ref_sku ' \
f'WHERE invc_dt_key<{start_date} AND invc_dt_key>={end_date} ' \
'AND UPPER(prod_prc_ref_sku) IS NOT NULL ' \
"AND P.stk_type_cd <> 'D' " \
f'GROUP BY UPPER(prod_prc_ref_sku)'
sales = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if prod_list:
print(f'There are {prod_list.count()} products.')
sales = sales.\
join(broadcast(prod_list), ['prod_id'], how='inner')
else:
print('Product list is not defined for pulling sales.')
if coupons:
coupons_count = coupons.select("coupon_key").distinct().count()
print(f'There are {coupons_count} rows in coupons table.')
sales = sales. \
join(broadcast(coupons), ['prod_id'], how = 'left'). \
withColumn('coupon', coalesce('coupon', 'prod_id'))
else:
print('Coupons is not defined for pulling sales.')
coupon_sales = sales.groupby('coupon', 'coupon_key').agg({'sales': 'sum'}). \
withColumnRenamed('sum(sales)', 'coupon_sales'). \
filter(col('coupon_sales') > 0)
if sales.count() == 0:
raise OutputOutOfBoundError('Sales count is 0. Check the data validity of cdwds.lsg_f_sls_invc.')
if self.debug:
print(f'Total rows in SKU sales count: {sales.count()}')
print(f'Total number of coupons with sales: {coupon_sales.count()}')
return sales, coupon_sales
@timer
def ccg_sales(self, prod_list, coupons):
start_date, end_date = date_period(self.period, self.start_date)
# Check bound date
table_name = 'cdwds.lsg_f_sls_invc'
dt_col_name = 'invc_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT '\
'UPPER(prod_prc_ref_sku) AS prod_id, sum(ext_net_sls_pmar_amt) AS sales ' \
'FROM cdwds.lsg_f_sls_invc ' \
f'WHERE invc_dt_key<{start_date} AND invc_dt_key>={end_date} ' \
f'and prod_prc_ref_sku IS NOT NULL ' \
f'GROUP BY UPPER(prod_prc_ref_sku)'
sales = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if prod_list:
if self.debug:
print(f'There are {prod_list.count()} products.')
sales = sales.\
join(broadcast(prod_list), ['prod_id'], how='inner')
else:
print('Product list is not defined for pulling sales.')
if coupons:
if self.debug:
print(f'There are {coupons.count()} rows in coupons table.')
sales = sales. \
join(broadcast(coupons), ['prod_id'], how = 'inner'). \
withColumn('coupon', coalesce('coupon', 'prod_id'))
else:
print('Coupons is not defined for pulling sales.')
coupon_sales = sales.groupby('coupon', 'coupon_key').agg({'sales': 'sum'}). \
withColumnRenamed('sum(sales)', 'coupon_sales'). \
filter(col('coupon_sales') > 0)
print(f'Total rows in SKU sales count: {sales.count()}')
print(f'Total number of coupons with sales: {coupon_sales.count()}')
return sales, coupon_sales
@timer
def cvm_pre_processing(
start_date: str,
period: int,
env: str,
division: str = 'LSG',
debug: bool = False,
):
module_logger.info(f'===== cvm_pre_processing for {division} in {env} : START ======')
module_logger.info(f'===== cvm_pre_processing : start_date = {start_date} ======')
module_logger.info(f'===== cvm_pre_processing : period = {period} days ======')
pull_history = MarketBasketPullHistory(start_date, period, env, debug = debug)
if division == 'LSG':
df, prod_list, coupons = pull_history.lsg_omni()
sales, coupon_sales = pull_history.lsg_sales(prod_list, coupons)
else:
df, prod_list, coupons = pull_history.ccg_omni()
sales, coupon_sales = pull_history.ccg_sales(prod_list, coupons)
# find scraper sessions: sessions with more than 30 clicks
if df:
session_prod_counts = df.groupBy('session_key').\
agg(countDistinct(col('prod_id'))).\
withColumnRenamed('count(DISTINCT prod_id)', 'prod_count')
session_coup_counts = df.groupBy('session_key').\
agg(countDistinct(col('coupon'))).\
withColumnRenamed('count(DISTINCT coupon)', 'coupon_count')
sessions_that_matter = session_prod_counts.\
join(session_coup_counts, ['session_key'], how='inner')
sessions_that_matter = sessions_that_matter. \
filter(col('coupon_count') <= 30). \
filter(col('coupon_count') > 1). \
filter(col('prod_count') > 1)
sessions_that_matter = clone(sessions_that_matter)
df = df.join(broadcast(sessions_that_matter), ['session_key'], how = 'inner').\
withColumnRenamed('session_key', 'basket_key')
sales_count = sales.count()
row_count = df.count()
module_logger.info(f'===== cvm_pre_processing : total_row_count for df = {row_count} ======')
module_logger.info(f'===== cvm_pre_processing : number of SKUs with sales = {sales_count} ======')
module_logger.info('===== cvm_pre_processing : END ======')
return sessions_that_matter, sales, coupon_sales, df
| from utility_functions.date_period import date_period, bound_date_check
from utility_functions.benchmark import timer
from utility_functions.databricks_uf import clone
from utility_functions.custom_errors import *
from connect2Databricks.read2Databricks import redshift_cdw_read
from pyspark.sql.functions import split, explode, col, ltrim, rtrim, coalesce, countDistinct, broadcast
from pyspark.sql.types import StructField, StringType, DecimalType, DateType, IntegerType, TimestampType, StructType
from pyspark.sql.window import Window
import pyspark.sql.functions as func
import pickle
from connect2Databricks.spark_init import spark_init
import logging
module_logger = logging.getLogger('CVM.cvm_pre_processing')
if 'spark' not in locals():
print('Environment: Databricks-Connect')
spark, sqlContext, setting = spark_init()
class MarketBasketPullHistory:
def __init__(self,
start_date: str,
period: int,
env: str,
debug: bool = False,
):
self.start_date = start_date
self.period = period
self.env = env
self.debug = debug
def __repr__(self):
return f'Pull history for {self.period} days from the {self.start_date} in {self.env} instance.'
@timer
def lsg_omni(self):
start_date, end_date = date_period(self.period, self.start_date)
table_name = 'datalake_omni.omni_hit_data'
dt_col_name = 'hit_time_gmt_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT ' \
'VS.visit_session_key AS session_key, ' \
'HIT.post_visid_combined AS visit_id, ' \
'HIT.visit_return_count AS visit_number, ' \
'UPPER(TRIM(prod_list)) AS prod_list, ' \
'HIT.hit_time_gmt_ts AS time_stamp, ' \
"TRIM(SUBSTRING(TRIM(DEMANDBASE), 0, POSITION('|' IN TRIM(DEMANDBASE)))) AS " \
"account_no " \
'FROM datalake_omni.omni_hit_data HIT ' \
'LEFT JOIN CDWDS.D_OMNI_VISIT_SESSION VS ON ' \
' VS.VISIT_RETURN_COUNT=HIT.VISIT_RETURN_COUNT AND VS.POST_VISID_COMBINED=HIT.POST_VISID_COMBINED ' \
f'WHERE HIT.hit_time_gmt_dt_key<{start_date} AND HIT.hit_time_gmt_dt_key>={end_date} ' \
'AND HIT.post_visid_combined IS NOT NULL ' \
"AND prod_list IS NOT NULL AND prod_list NOT LIKE '%shipping-handling%' " \
"AND TRIM(SUBSTRING(TRIM(DEMANDBASE), 0, POSITION('|' IN TRIM(DEMANDBASE)))) <> '' "
schema = StructType([
StructField('session_key', IntegerType(), True),
StructField('visit_id', StringType(), True),
StructField('visit_number', IntegerType(), True),
StructField('time_stamp', StringType(), True),
StructField('prod_list', StringType(), True),
StructField('account_no', StringType(), True),
])
df = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env, schema = schema). \
withColumn('prod_id_untrimmed', explode(split('prod_list', ','))). \
withColumn('prod_id', ltrim(rtrim(col('prod_id_untrimmed')))). \
drop('prod_id_untrimmed'). \
drop('prod_list'). \
filter(col('prod_id').isNotNull()). \
filter(col('prod_id') != ''). \
distinct()
if self.debug:
print(f'row count for df = {df.count()}')
# find active products
query = 'SELECT sku as prod_id, stk_type_cd '\
'FROM cdwds.lsg_prod_v ' \
"WHERE stk_type_cd = 'D'"
discontinued_prods = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
df = df.join(discontinued_prods, ['prod_id'], how = 'left').\
filter(col('stk_type_cd').isNull()).\
drop('stk_type_cd')
if self.debug:
print(f'After filtering out discontinued SKUs, row count for df = {df.count()}')
query = 'SELECT UPPER(sku_nbr) AS prod_id, size_grp AS coupon ' \
'FROM cdwds.f_web_prod_feature ' \
"WHERE size_grp IS NOT NULL AND size_grp <> 'T' " \
'GROUP BY sku_nbr, size_grp'
coupons = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if coupons.count() == 0:
raise DataValidityError('No coupon information. Please check the validity of size_grp column '
'on cdwds.f_web_prod_feature.')
df = df.join(broadcast(coupons), ['prod_id'], how = 'left').\
withColumn('coupon', coalesce('coupon', 'prod_id'))
prod_list = df.select('prod_id').distinct()
coupons = coupons.union(df.select('prod_id', 'coupon')).\
filter(col('prod_id').isNotNull()).\
distinct().\
withColumn("coupon_key", func.dense_rank().over(Window.orderBy('coupon')))
df = df.join(coupons, ['prod_id', 'coupon'], how = 'left')
if self.debug:
coupons.show()
df.show()
print(f'row count for coupons = {coupons.select(col("coupon_key")).distinct().count()}')
return df, prod_list, coupons
@timer
def ccg_omni(self):
start_date = self.start_date
period = self.period
env = self.env
df = []
prod_list = df.select('prod_id').distinct()
coupons = []
return df, prod_list, coupons
@timer
def lsg_sales(self, prod_list, coupons):
start_date, end_date = date_period(self.period, self.start_date)
# Check bound date
table_name = 'cdwds.lsg_f_sls_invc'
dt_col_name = 'invc_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT '\
'UPPER(prod_prc_ref_sku) AS prod_id, sum(ext_net_sls_pmar_amt) AS sales ' \
'FROM cdwds.lsg_f_sls_invc I' \
'LEFT JOIN cdwds.lsg_prod_v P ON P.sku = prod_prc_ref_sku ' \
f'WHERE invc_dt_key<{start_date} AND invc_dt_key>={end_date} ' \
'AND UPPER(prod_prc_ref_sku) IS NOT NULL ' \
"AND P.stk_type_cd <> 'D' " \
f'GROUP BY UPPER(prod_prc_ref_sku)'
sales = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if prod_list:
print(f'There are {prod_list.count()} products.')
sales = sales.\
join(broadcast(prod_list), ['prod_id'], how='inner')
else:
print('Product list is not defined for pulling sales.')
if coupons:
coupons_count = coupons.select("coupon_key").distinct().count()
print(f'There are {coupons_count} rows in coupons table.')
sales = sales. \
join(broadcast(coupons), ['prod_id'], how = 'left'). \
withColumn('coupon', coalesce('coupon', 'prod_id'))
else:
print('Coupons is not defined for pulling sales.')
coupon_sales = sales.groupby('coupon', 'coupon_key').agg({'sales': 'sum'}). \
withColumnRenamed('sum(sales)', 'coupon_sales'). \
filter(col('coupon_sales') > 0)
if sales.count() == 0:
raise OutputOutOfBoundError('Sales count is 0. Check the data validity of cdwds.lsg_f_sls_invc.')
if self.debug:
print(f'Total rows in SKU sales count: {sales.count()}')
print(f'Total number of coupons with sales: {coupon_sales.count()}')
return sales, coupon_sales
@timer
def ccg_sales(self, prod_list, coupons):
start_date, end_date = date_period(self.period, self.start_date)
# Check bound date
table_name = 'cdwds.lsg_f_sls_invc'
dt_col_name = 'invc_dt_key'
_, bound_end_date = date_period(-1, end_date)
bound_date_check(table_name, dt_col_name, start_date, bound_end_date, self.env, 'YYYYMMDD', 'LSG')
query = 'SELECT '\
'UPPER(prod_prc_ref_sku) AS prod_id, sum(ext_net_sls_pmar_amt) AS sales ' \
'FROM cdwds.lsg_f_sls_invc ' \
f'WHERE invc_dt_key<{start_date} AND invc_dt_key>={end_date} ' \
f'and prod_prc_ref_sku IS NOT NULL ' \
f'GROUP BY UPPER(prod_prc_ref_sku)'
sales = redshift_cdw_read(query, db_type = 'RS', database = 'CDWDS', env = self.env)
if prod_list:
if self.debug:
print(f'There are {prod_list.count()} products.')
sales = sales.\
join(broadcast(prod_list), ['prod_id'], how='inner')
else:
print('Product list is not defined for pulling sales.')
if coupons:
if self.debug:
print(f'There are {coupons.count()} rows in coupons table.')
sales = sales. \
join(broadcast(coupons), ['prod_id'], how = 'inner'). \
withColumn('coupon', coalesce('coupon', 'prod_id'))
else:
print('Coupons is not defined for pulling sales.')
coupon_sales = sales.groupby('coupon', 'coupon_key').agg({'sales': 'sum'}). \
withColumnRenamed('sum(sales)', 'coupon_sales'). \
filter(col('coupon_sales') > 0)
print(f'Total rows in SKU sales count: {sales.count()}')
print(f'Total number of coupons with sales: {coupon_sales.count()}')
return sales, coupon_sales
@timer
def cvm_pre_processing(
start_date: str,
period: int,
env: str,
division: str = 'LSG',
debug: bool = False,
):
module_logger.info(f'===== cvm_pre_processing for {division} in {env} : START ======')
module_logger.info(f'===== cvm_pre_processing : start_date = {start_date} ======')
module_logger.info(f'===== cvm_pre_processing : period = {period} days ======')
pull_history = MarketBasketPullHistory(start_date, period, env, debug = debug)
if division == 'LSG':
df, prod_list, coupons = pull_history.lsg_omni()
sales, coupon_sales = pull_history.lsg_sales(prod_list, coupons)
else:
df, prod_list, coupons = pull_history.ccg_omni()
sales, coupon_sales = pull_history.ccg_sales(prod_list, coupons)
# find scraper sessions: sessions with more than 30 clicks
if df:
session_prod_counts = df.groupBy('session_key').\
agg(countDistinct(col('prod_id'))).\
withColumnRenamed('count(DISTINCT prod_id)', 'prod_count')
session_coup_counts = df.groupBy('session_key').\
agg(countDistinct(col('coupon'))).\
withColumnRenamed('count(DISTINCT coupon)', 'coupon_count')
sessions_that_matter = session_prod_counts.\
join(session_coup_counts, ['session_key'], how='inner')
sessions_that_matter = sessions_that_matter. \
filter(col('coupon_count') <= 30). \
filter(col('coupon_count') > 1). \
filter(col('prod_count') > 1)
sessions_that_matter = clone(sessions_that_matter)
df = df.join(broadcast(sessions_that_matter), ['session_key'], how = 'inner').\
withColumnRenamed('session_key', 'basket_key')
sales_count = sales.count()
row_count = df.count()
module_logger.info(f'===== cvm_pre_processing : total_row_count for df = {row_count} ======')
module_logger.info(f'===== cvm_pre_processing : number of SKUs with sales = {sales_count} ======')
module_logger.info('===== cvm_pre_processing : END ======')
return sessions_that_matter, sales, coupon_sales, df
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
import re
import time
from argparse import Namespace
from datetime import timedelta
from logging import INFO
from pathlib import Path
from typing import Union
from unittest import mock
from unittest.mock import call, MagicMock, Mock, patch
import cloudpickle
import pytest
import torch
import yaml
from torch import optim
import pytorch_lightning as pl
import tests.helpers.utils as tutils
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _OMEGACONF_AVAILABLE:
from omegaconf import Container, OmegaConf
def test_model_checkpoint_state_key():
early_stopping = ModelCheckpoint(monitor="val_loss")
expected_id = (
"ModelCheckpoint{'monitor': 'val_loss', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': None}"
)
assert early_stopping.state_key == expected_id
class LogInTwoMethods(BoringModel):
def training_step(self, batch, batch_idx):
out = super().training_step(batch, batch_idx)
self.log("early_stop_on", out["loss"])
return out
def validation_epoch_end(self, outputs):
outs = torch.stack([x["x"] for x in outputs]).mean()
self.log("val_acc", outs)
def mock_training_epoch_loop(trainer):
# do not use `unittest.Mock` because we need to store the return value
calls = {}
old_get_monitor_value = trainer.fit_loop.epoch_loop._get_monitor_value
def mock(key):
value = old_get_monitor_value(key)
calls[trainer.current_epoch] = {key: value}
return value
trainer.fit_loop.epoch_loop._get_monitor_value = mock
return calls
@pytest.mark.parametrize(
"validation_step_none,val_dataloaders_none,monitor",
[(False, False, "val_log"), (True, False, "train_log_epoch"), (False, True, "val_log")],
)
@pytest.mark.parametrize("reduce_lr_on_plateau", [False, True])
def test_model_checkpoint_score_and_ckpt(
tmpdir, validation_step_none: bool, val_dataloaders_none: bool, monitor: str, reduce_lr_on_plateau: bool
):
"""Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and
checkpoint data."""
max_epochs = 3
limit_train_batches = 5
limit_val_batches = 7
lr, gamma = 1e-1, 2
class CustomBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.train_log_epochs = torch.randn(max_epochs, limit_train_batches)
self.val_logs = torch.randn(max_epochs, limit_val_batches)
self.scores = []
def training_step(self, batch, batch_idx):
log_value = self.train_log_epochs[self.current_epoch, batch_idx]
self.log("train_log", log_value, on_epoch=True)
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
log_value = self.val_logs[self.current_epoch, batch_idx]
self.log("val_log", log_value)
self.log("epoch", self.current_epoch, on_epoch=True)
return super().validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=lr)
if reduce_lr_on_plateau:
lr_scheduler = {
"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": monitor,
"strict": True,
}
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
return [optimizer], [lr_scheduler]
def on_train_epoch_end(self):
if "train" in monitor:
self.scores.append(self.trainer.logged_metrics[monitor])
def on_validation_epoch_end(self):
if not self.trainer.sanity_checking and "val" in monitor:
self.scores.append(self.trainer.logged_metrics[monitor])
filename = "{" + f"{monitor}" + ":.4f}-{epoch}"
checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)
model = CustomBoringModel()
if validation_step_none:
model.validation_step = None
if val_dataloaders_none:
model.val_dataloaders = None
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint],
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
max_epochs=max_epochs,
enable_progress_bar=False,
)
calls = mock_training_epoch_loop(trainer)
trainer.fit(model)
ckpt_files = list(Path(tmpdir).glob("*.ckpt"))
assert len(ckpt_files) == len(model.scores) == max_epochs
for epoch in range(max_epochs):
score = model.scores[epoch]
expected_score = getattr(model, f"{monitor}s")[epoch].mean().item()
expected_filename = f"{monitor}={score:.4f}-epoch={epoch}.ckpt"
assert math.isclose(score, expected_score, rel_tol=1e-4)
chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))
assert chk["epoch"] == epoch + 1
assert chk["global_step"] == limit_train_batches * (epoch + 1)
mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{"monitor": "{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
assert mc_specific_data["current_score"] == score
if not reduce_lr_on_plateau:
actual_step_count = chk["lr_schedulers"][0]["_step_count"]
actual_lr = chk["lr_schedulers"][0]["_last_lr"][0]
# checkpoint is saved after updating lr_scheduler states
assert actual_step_count == epoch + 2 # step_count starts at 1
assert actual_lr == lr * gamma ** (epoch + 1)
else:
assert calls[epoch] == {monitor: score}
@pytest.mark.parametrize(
"val_check_interval,reduce_lr_on_plateau,epoch_aligned",
[(0.25, True, True), (0.25, False, True), (0.42, False, False)],
)
def test_model_checkpoint_score_and_ckpt_val_check_interval(
tmpdir, val_check_interval, reduce_lr_on_plateau, epoch_aligned
):
"""Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and
checkpoint data with val_check_interval."""
seed_everything(0)
max_epochs = 3
limit_train_batches = 12
limit_val_batches = 7
lr, gamma = 1e-1, 2
monitor = "val_log"
per_val_train_batches = int(limit_train_batches * val_check_interval)
per_epoch_val_checks, leftover_train_batches = divmod(limit_train_batches, per_val_train_batches)
class CustomBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.val_logs = torch.randn(per_epoch_val_checks * max_epochs, limit_val_batches)
self.val_loop_count = 0
self.scores = []
def validation_step(self, batch, batch_idx):
log_value = self.val_logs[self.val_loop_count, batch_idx]
self.log("val_log", log_value)
return super().validation_step(batch, batch_idx)
def validation_epoch_end(self, outputs):
self.val_loop_count += 1
super().validation_epoch_end(outputs)
self.scores.append(self.trainer.logged_metrics[monitor])
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=lr)
if reduce_lr_on_plateau:
lr_scheduler = {
"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": monitor,
"strict": True,
}
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
return [optimizer], [lr_scheduler]
filename = "{" + f"{monitor}" + ":.4f}-{epoch}"
checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint],
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
max_epochs=max_epochs,
val_check_interval=val_check_interval,
enable_progress_bar=False,
num_sanity_val_steps=0,
)
calls = mock_training_epoch_loop(trainer)
trainer.fit(model)
def _make_assertions(epoch, ix):
global_ix = ix + per_epoch_val_checks * epoch
# checkpoint saved at the end of training epoch will have updated lr_scheduler states
epoch_end_checkpoint = epoch_aligned and ix == (per_epoch_val_checks - 1)
score = model.scores[global_ix]
expected_score = getattr(model, f"{monitor}s")[global_ix].mean().item()
expected_filename = f"{monitor}={score:.4f}-epoch={epoch}.ckpt"
assert math.isclose(score, expected_score, rel_tol=1e-4)
chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))
assert chk["epoch"] == epoch + 1
expected_global_step = per_val_train_batches * (global_ix + 1) + (leftover_train_batches * epoch)
assert chk["global_step"] == expected_global_step
mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{"monitor": "{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': False}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
assert mc_specific_data["current_score"] == score
if not reduce_lr_on_plateau:
actual_step_count = chk["lr_schedulers"][0]["_step_count"]
actual_lr = chk["lr_schedulers"][0]["_last_lr"][0]
assert actual_step_count == epoch + 1 + epoch_end_checkpoint
assert actual_lr == lr * gamma ** (epoch + epoch_end_checkpoint)
return score
ckpt_files = list(Path(tmpdir).glob("*.ckpt"))
assert len(ckpt_files) == len(model.scores) == per_epoch_val_checks * max_epochs
for epoch in range(max_epochs):
for i in range(per_epoch_val_checks):
score = _make_assertions(epoch, i)
if reduce_lr_on_plateau:
assert calls[epoch] == {monitor: score}
@pytest.mark.parametrize("save_top_k", [-1, 0, 1, 2])
def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int):
"""Test that dirpath=None in checkpoint callback is valid and that ckpt_path is set correctly."""
tutils.reset_seed()
model = LogInTwoMethods()
checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}", save_top_k=save_top_k)
max_epochs = 2
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs)
trainer.fit(model)
assert checkpoint.dirpath == tmpdir / trainer.logger.name / "version_0" / "checkpoints"
if save_top_k == -1:
ckpt_files = os.listdir(checkpoint.dirpath)
expected_ckpt_files = [f"epoch={i}.ckpt" for i in range(max_epochs)]
assert len(ckpt_files) == len(expected_ckpt_files) == max_epochs
assert set(ckpt_files) == set(expected_ckpt_files)
@pytest.mark.parametrize("save_top_k", [-1, 0, 1, 2])
def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int):
"""Test that None in checkpoint callback is valid and that chkp_path is set correctly."""
tutils.reset_seed()
model = LogInTwoMethods()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_top_k=save_top_k)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=2)
trainer.fit(model)
path_yaml = os.path.join(tmpdir, "best_k_models.yaml")
checkpoint.to_yaml(path_yaml)
d = yaml.full_load(open(path_yaml))
best_k = dict(checkpoint.best_k_models.items())
assert d == best_k
@pytest.mark.parametrize("logger_version,expected", [(None, "version_0"), (1, "version_1"), ("awesome", "awesome")])
def test_model_checkpoint_path(tmpdir, logger_version: Union[None, int, str], expected: str):
"""Test that "version_" prefix is only added when logger's version is an integer."""
tutils.reset_seed()
model = LogInTwoMethods()
logger = TensorBoardLogger(str(tmpdir), version=logger_version)
trainer = Trainer(default_root_dir=tmpdir, overfit_batches=0.2, max_epochs=2, logger=logger)
trainer.fit(model)
ckpt_version = Path(trainer.checkpoint_callback.dirpath).parent.name
assert ckpt_version == expected
def test_pickling(tmpdir):
ckpt = ModelCheckpoint(dirpath=tmpdir)
ckpt_pickled = pickle.dumps(ckpt)
ckpt_loaded = pickle.loads(ckpt_pickled)
assert vars(ckpt) == vars(ckpt_loaded)
ckpt_pickled = cloudpickle.dumps(ckpt)
ckpt_loaded = cloudpickle.loads(ckpt_pickled)
assert vars(ckpt) == vars(ckpt_loaded)
class ModelCheckpointTestInvocations(ModelCheckpoint):
# this class has to be defined outside the test function, otherwise we get pickle error
# due to the way ddp process is launched
def __init__(self, expected_count, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_count = expected_count
self.on_save_checkpoint_count = 0
def on_train_start(self, trainer, pl_module):
torch.save = Mock(wraps=torch.save)
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
# only rank 0 will call ``torch.save``
super().on_save_checkpoint(trainer, pl_module, checkpoint)
self.on_save_checkpoint_count += 1
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
assert self.best_model_path
assert self.best_model_score
assert self.on_save_checkpoint_count == self.expected_count
if trainer.is_global_zero:
assert torch.save.call_count == self.expected_count
else:
assert torch.save.call_count == 0
@RunIf(skip_windows=True, skip_49370=True)
def test_model_checkpoint_no_extraneous_invocations(tmpdir):
"""Test to ensure that the model callback saves the checkpoints only once in distributed mode."""
model = LogInTwoMethods()
num_epochs = 4
model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1)
trainer = Trainer(
strategy="ddp_spawn",
accelerator="cpu",
devices=2,
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=num_epochs,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_model_checkpoint_format_checkpoint_name(tmpdir):
# empty filename:
ckpt_name = ModelCheckpoint._format_checkpoint_name("", {"epoch": 3, "step": 2})
assert ckpt_name == "epoch=3-step=2"
ckpt_name = ModelCheckpoint._format_checkpoint_name(None, {"epoch": 3, "step": 2}, prefix="test")
assert ckpt_name == "test-epoch=3-step=2"
# no groups case:
ckpt_name = ModelCheckpoint._format_checkpoint_name("ckpt", {}, prefix="test")
assert ckpt_name == "test-ckpt"
# no prefix
ckpt_name = ModelCheckpoint._format_checkpoint_name("{epoch:03d}-{acc}", {"epoch": 3, "acc": 0.03})
assert ckpt_name == "epoch=003-acc=0.03"
# prefix
char_org = ModelCheckpoint.CHECKPOINT_JOIN_CHAR
ModelCheckpoint.CHECKPOINT_JOIN_CHAR = "@"
ckpt_name = ModelCheckpoint._format_checkpoint_name("{epoch},{acc:.5f}", {"epoch": 3, "acc": 0.03}, prefix="test")
assert ckpt_name == "test@epoch=3,acc=0.03000"
ModelCheckpoint.CHECKPOINT_JOIN_CHAR = char_org
# no dirpath set
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath=None).format_checkpoint_name({"epoch": 3, "step": 2})
assert ckpt_name == "epoch=3-step=2.ckpt"
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath="").format_checkpoint_name({"epoch": 5, "step": 4})
assert ckpt_name == "epoch=5-step=4.ckpt"
# CWD
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath=".").format_checkpoint_name({"epoch": 3, "step": 4})
assert ckpt_name == str(Path(".").resolve() / "epoch=3-step=4.ckpt")
# with version
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, filename="name")
ckpt_name = ckpt.format_checkpoint_name({}, ver=3)
assert ckpt_name == tmpdir / "name-v3.ckpt"
# using slashes
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}_{val/loss:.5f}")
ckpt_name = ckpt.format_checkpoint_name({"epoch": 4, "val/loss": 0.03})
assert ckpt_name == "epoch=4_val/loss=0.03000.ckpt"
# auto_insert_metric_name=False
ckpt_name = ModelCheckpoint._format_checkpoint_name(
"epoch={epoch:03d}-val_acc={val/acc}", {"epoch": 3, "val/acc": 0.03}, auto_insert_metric_name=False
)
assert ckpt_name == "epoch=003-val_acc=0.03"
class ModelCheckpointExtensionTest(ModelCheckpoint):
FILE_EXTENSION = ".tpkc"
def test_model_checkpoint_file_extension(tmpdir):
"""Test ModelCheckpoint with different file extension."""
model = LogInTwoMethods()
model_checkpoint = ModelCheckpointExtensionTest(
monitor="early_stop_on", dirpath=tmpdir, save_top_k=1, save_last=True
)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[model_checkpoint], max_steps=1, logger=False)
trainer.fit(model)
expected = ["epoch=0-step=0.tpkc", "last.tpkc"]
assert set(expected) == set(os.listdir(tmpdir))
def test_model_checkpoint_save_last(tmpdir):
"""Tests that save_last produces only one last checkpoint."""
seed_everything()
model = LogInTwoMethods()
epochs = 3
ModelCheckpoint.CHECKPOINT_NAME_LAST = "last-{epoch}"
model_checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, save_top_k=-1, save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=epochs,
limit_train_batches=10,
limit_val_batches=10,
logger=False,
)
trainer.fit(model)
last_filename = model_checkpoint._format_checkpoint_name(
ModelCheckpoint.CHECKPOINT_NAME_LAST, {"epoch": trainer.current_epoch}
)
last_filename = last_filename + ".ckpt"
assert str(tmpdir / last_filename) == model_checkpoint.last_model_path
assert set(os.listdir(tmpdir)) == set(
[f"epoch={i}-step={j}.ckpt" for i, j in zip(range(epochs), [9, 19, 29])] + [last_filename]
)
ModelCheckpoint.CHECKPOINT_NAME_LAST = "last"
def test_invalid_top_k(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative save_top_k argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= -1"):
ModelCheckpoint(dirpath=tmpdir, save_top_k=-3)
def test_none_monitor_top_k(tmpdir):
"""Test that a warning appears for positive top_k with monitor=None."""
with pytest.raises(
MisconfigurationException, match=r"ModelCheckpoint\(save_top_k=3, monitor=None\) is not a valid*"
):
ModelCheckpoint(dirpath=tmpdir, save_top_k=3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, save_top_k=-1)
ModelCheckpoint(dirpath=tmpdir, save_top_k=0)
ModelCheckpoint(dirpath=tmpdir, save_top_k=1)
def test_invalid_every_n_epochs(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= 0"):
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=-3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=0)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=1)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)
def test_invalid_every_n_train_steps(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= 0"):
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=-3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)
def test_invalid_trigger_combination(tmpdir):
"""Test that a MisconfigurationException is raised if more than one of every_n_epochs, every_n_train_steps, and
train_time_interval are enabled together."""
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1, every_n_epochs=2)
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_epochs=2)
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_train_steps=2)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=3)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=4, every_n_epochs=0)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=0, train_time_interval=timedelta(minutes=1))
def test_none_every_n_train_steps_val_epochs(tmpdir):
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir)
assert checkpoint_callback.every_n_epochs == 1
assert checkpoint_callback._every_n_train_steps == 0
def test_model_checkpoint_save_last_none_monitor(tmpdir, caplog):
"""Test that it is possible to save all checkpoints when monitor=None."""
seed_everything()
model = LogInTwoMethods()
epochs = 2
checkpoint_callback = ModelCheckpoint(monitor=None, dirpath=tmpdir, save_top_k=-1, save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
limit_train_batches=10,
limit_val_batches=10,
max_epochs=epochs,
logger=False,
)
with caplog.at_level(INFO):
trainer.fit(model)
assert "will duplicate the last checkpoint saved" in caplog.text
# these should not be set if monitor is None
assert checkpoint_callback.monitor is None
assert checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=19.ckpt"
assert checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
assert checkpoint_callback.best_model_score is None
assert checkpoint_callback.best_k_models == {}
assert checkpoint_callback.kth_best_model_path == ""
# check that the correct ckpts were created
expected = [f"epoch={i}-step={j}.ckpt" for i, j in zip(range(epochs), [9, 19])]
expected.append("last.ckpt")
assert set(os.listdir(tmpdir)) == set(expected)
@pytest.mark.parametrize("every_n_epochs", list(range(4)))
def test_model_checkpoint_every_n_epochs(tmpdir, every_n_epochs):
model = LogInTwoMethods()
epochs = 5
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir, filename="{epoch}", save_top_k=-1, every_n_epochs=every_n_epochs
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
max_epochs=epochs,
limit_train_batches=1,
limit_val_batches=1,
logger=False,
)
trainer.fit(model)
# check that the correct ckpts were created
expected = [f"epoch={e}.ckpt" for e in range(epochs) if not (e + 1) % every_n_epochs] if every_n_epochs > 0 else []
assert set(os.listdir(tmpdir)) == set(expected)
def test_ckpt_every_n_train_steps(tmpdir):
"""Tests that the checkpoints are saved every n training steps."""
model = LogInTwoMethods()
every_n_train_steps = 16
max_epochs = 2
epoch_length = 64
checkpoint_callback = ModelCheckpoint(
filename="{step}",
every_n_epochs=0,
every_n_train_steps=every_n_train_steps,
dirpath=tmpdir,
save_top_k=-1,
save_last=False,
)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
enable_progress_bar=False,
callbacks=[checkpoint_callback],
logger=False,
)
trainer.fit(model)
expected = [
f"step={i}.ckpt" for i in range(every_n_train_steps - 1, max_epochs * epoch_length, every_n_train_steps)
]
assert set(os.listdir(tmpdir)) == set(expected)
@mock.patch("pytorch_lightning.callbacks.model_checkpoint.time")
def test_model_checkpoint_train_time_interval(mock_datetime, tmpdir) -> None:
"""Tests that the checkpoints are saved at the specified time interval."""
seconds_per_batch = 7
start_time = time.monotonic()
batches_per_epoch = 64
num_epochs = 2
max_batches = batches_per_epoch * num_epochs + 1
mock_datetime.monotonic.side_effect = [start_time + seconds_per_batch * i for i in range(max_batches)]
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
min_epochs=num_epochs,
max_epochs=num_epochs,
enable_progress_bar=False,
callbacks=[
ModelCheckpoint(
filename="{epoch}-{step}",
dirpath=tmpdir,
train_time_interval=timedelta(minutes=1),
save_top_k=-1,
save_last=False,
)
],
logger=False,
)
trainer.fit(model)
# Each batch takes 7 sec and we checkpoint every minute. There are 64
# batches per epoch, so total time to run is 7*64*2 = 896 sec < 14.96 minutes,
# so we should have 14 checkpoints.
assert len(os.listdir(tmpdir)) == 14
def test_model_checkpoint_topk_zero(tmpdir):
"""Test that no checkpoints are saved when save_top_k=0."""
model = LogInTwoMethods()
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_top_k=0, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=2, logger=False)
trainer.fit(model)
# these should not be set if monitor is None
assert checkpoint_callback.monitor is None
assert checkpoint_callback.best_model_path == ""
assert checkpoint_callback.best_model_score is None
assert checkpoint_callback.best_k_models == {}
assert checkpoint_callback.kth_best_model_path == ""
# check that only the last ckpt was created
assert os.listdir(tmpdir) == ["last.ckpt"]
assert checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
def test_model_checkpoint_topk_all(tmpdir):
"""Test that save_top_k=-1 tracks the best models when monitor key is provided."""
seed_everything(1000)
epochs = 3
model = BoringModel()
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir, filename="{epoch}", monitor="epoch", mode="max", save_top_k=-1
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
max_epochs=epochs,
logger=False,
val_check_interval=1.0,
)
trainer.fit(model)
assert checkpoint_callback.monitor == "epoch"
assert checkpoint_callback.best_model_path == tmpdir / "epoch=2.ckpt"
assert checkpoint_callback.best_model_score == epochs - 1
assert len(os.listdir(tmpdir)) == len(checkpoint_callback.best_k_models) == epochs
assert set(checkpoint_callback.best_k_models.keys()) == {str(tmpdir / f"epoch={i}.ckpt") for i in range(epochs)}
assert checkpoint_callback.kth_best_model_path == tmpdir / "epoch=0.ckpt"
def test_ckpt_metric_names(tmpdir):
model = LogInTwoMethods()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gradient_clip_val=1.0,
overfit_batches=0.20,
enable_progress_bar=False,
limit_train_batches=0.01,
limit_val_batches=0.01,
callbacks=[ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, filename="{val_loss:.2f}")],
)
trainer.fit(model)
# make sure the checkpoint we saved has the metric in the name
ckpts = os.listdir(tmpdir)
ckpts = [x for x in ckpts if "val_loss" in x]
assert len(ckpts) == 1
val = re.sub("[^0-9.]", "", ckpts[0])
assert len(val) > 3
def test_default_checkpoint_behavior(tmpdir):
seed_everything(1234)
model = LogInTwoMethods()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5
)
with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock:
trainer.fit(model)
results = trainer.test()
assert len(results) == 1
save_dir = tmpdir / "lightning_logs" / "version_0" / "checkpoints"
save_weights_only = trainer.checkpoint_callback.save_weights_only
save_mock.assert_has_calls(
[
call(save_dir / "epoch=0-step=4.ckpt", save_weights_only),
call(save_dir / "epoch=1-step=9.ckpt", save_weights_only),
call(save_dir / "epoch=2-step=14.ckpt", save_weights_only),
]
)
ckpts = os.listdir(save_dir)
assert len(ckpts) == 1
assert ckpts[0] == "epoch=2-step=14.ckpt"
@pytest.mark.parametrize("max_epochs", [1, 2])
@pytest.mark.parametrize("should_validate", [True, False])
@pytest.mark.parametrize("save_last", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
def test_model_checkpoint_save_last_warning(
tmpdir, caplog, max_epochs: int, should_validate: bool, save_last: bool, verbose: bool
):
"""Tests 'Saving latest checkpoint...' log."""
model = LogInTwoMethods()
if not should_validate:
model.validation_step = None
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, save_top_k=0, save_last=save_last, verbose=verbose)
trainer = Trainer(
default_root_dir=tmpdir, callbacks=[ckpt], max_epochs=max_epochs, limit_train_batches=1, limit_val_batches=1
)
with caplog.at_level(logging.INFO):
trainer.fit(model)
assert caplog.messages.count("Saving latest checkpoint...") == (verbose and save_last)
def test_model_checkpoint_save_last_checkpoint_contents(tmpdir):
"""Tests that the save_last checkpoint contains the latest information."""
seed_everything(100)
model = LogInTwoMethods()
num_epochs = 3
model_checkpoint = ModelCheckpoint(
monitor="early_stop_on", dirpath=tmpdir, filename="{epoch}", save_top_k=num_epochs, save_last=True
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=num_epochs,
limit_train_batches=2,
limit_val_batches=2,
)
trainer.fit(model)
path_last_epoch = str(tmpdir / f"epoch={num_epochs - 1}.ckpt")
path_last = str(tmpdir / "last.ckpt")
assert path_last == model_checkpoint.last_model_path
assert os.path.isfile(path_last_epoch)
ckpt_last_epoch = torch.load(path_last_epoch)
ckpt_last = torch.load(path_last)
assert ckpt_last_epoch["epoch"] == ckpt_last["epoch"]
assert ckpt_last_epoch["global_step"] == ckpt_last["global_step"]
ckpt_id = (
"ModelCheckpoint{'monitor': 'early_stop_on', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
)
assert ckpt_last["callbacks"][ckpt_id] == ckpt_last_epoch["callbacks"][ckpt_id]
# it is easier to load the model objects than to iterate over the raw dict of tensors
model_last_epoch = LogInTwoMethods.load_from_checkpoint(path_last_epoch)
model_last = LogInTwoMethods.load_from_checkpoint(model_checkpoint.last_model_path)
for w0, w1 in zip(model_last_epoch.parameters(), model_last.parameters()):
assert w0.eq(w1).all()
@pytest.mark.parametrize("mode", ["min", "max"])
def test_checkpointing_with_nan_as_first(tmpdir, mode):
monitor = [float("nan")]
monitor += [5, 7, 8] if mode == "max" else [8, 7, 5]
class CurrentModel(LogInTwoMethods):
def validation_epoch_end(self, outputs):
val_loss = monitor[self.current_epoch]
self.log("abc", val_loss)
model = CurrentModel()
callback = ModelCheckpoint(monitor="abc", mode=mode, save_top_k=1, dirpath=tmpdir)
trainer = Trainer(
callbacks=[callback],
default_root_dir=tmpdir,
val_check_interval=1.0,
max_epochs=len(monitor),
)
trainer.save_checkpoint = MagicMock()
trainer.fit(model)
# check that last one is also the best one
assert trainer.save_checkpoint.call_count == len(monitor)
assert mode == "min" and callback.best_model_score == 5 or mode == "max" and callback.best_model_score == 8
def test_checkpoint_repeated_strategy(tmpdir):
"""This test validates checkpoint can be called several times without increasing internally its global step if
nothing run."""
checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=tmpdir, filename="{epoch:02d}")
class ExtendedBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("val_loss", loss)
model = ExtendedBoringModel()
model.validation_epoch_end = None
trainer_kwargs = {
"max_epochs": 1,
"limit_train_batches": 2,
"limit_val_batches": 2,
"limit_test_batches": 2,
"enable_progress_bar": False,
"enable_model_summary": False,
}
trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback])
trainer.fit(model)
assert os.listdir(tmpdir) == ["epoch=00.ckpt"]
for idx in range(4):
# load from checkpoint
trainer = pl.Trainer(**trainer_kwargs, default_root_dir=tmpdir)
trainer.fit(model, ckpt_path=checkpoint_callback.best_model_path)
trainer.test(ckpt_path=checkpoint_callback.best_model_path, verbose=False)
assert set(os.listdir(tmpdir)) == {"epoch=00.ckpt", "lightning_logs"}
assert set(os.listdir(tmpdir / "lightning_logs")) == {f"version_{i}" for i in range(4)}
def test_checkpoint_repeated_strategy_extended(tmpdir):
"""This test validates checkpoint can be called several times without increasing internally its global step if
nothing run."""
class ExtendedBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"val_loss": loss}
def validation_epoch_end(self, *_):
...
def assert_trainer_init(trainer):
assert trainer.global_step == 0
assert trainer.current_epoch == 0
def get_last_checkpoint(ckpt_dir):
last = ckpt_dir.listdir(sort=True)[-1]
return str(last)
def assert_checkpoint_content(ckpt_dir):
chk = pl_load(get_last_checkpoint(ckpt_dir))
assert chk["epoch"] == epochs
assert chk["global_step"] == 4
def assert_checkpoint_log_dir(idx):
lightning_logs = tmpdir / "lightning_logs"
actual = [d.basename for d in lightning_logs.listdir(sort=True)]
assert actual == [f"version_{i}" for i in range(idx + 1)]
actual = [d.basename for d in ckpt_dir.listdir()]
assert len(actual) == epochs, actual
ckpt_dir = tmpdir / "checkpoints"
checkpoint_cb = ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)
epochs = 2
limit_train_batches = 2
trainer_config = dict(
default_root_dir=tmpdir,
max_epochs=epochs,
limit_train_batches=limit_train_batches,
limit_val_batches=3,
limit_test_batches=4,
callbacks=[checkpoint_cb],
)
trainer = pl.Trainer(**trainer_config)
assert_trainer_init(trainer)
model = ExtendedBoringModel()
trainer.fit(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs - 1
assert_checkpoint_log_dir(0)
assert_checkpoint_content(ckpt_dir)
trainer.validate(model)
assert trainer.current_epoch == epochs - 1
trainer.test(model)
assert trainer.current_epoch == epochs - 1
for idx in range(1, 5):
chk = get_last_checkpoint(ckpt_dir)
assert_checkpoint_content(ckpt_dir)
# load from checkpoint
trainer_config["callbacks"] = [ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)]
trainer = pl.Trainer(**trainer_config)
assert_trainer_init(trainer)
model = ExtendedBoringModel()
trainer.test(model)
assert trainer.global_step == 0
assert trainer.current_epoch == 0
trainer.fit(model, ckpt_path=chk)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
trainer.validate(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
trainer.fit(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
assert_checkpoint_log_dir(idx)
def test_configure_model_checkpoint(tmpdir):
"""Test all valid and invalid ways a checkpoint callback can be passed to the Trainer."""
kwargs = dict(default_root_dir=tmpdir)
callback1 = ModelCheckpoint()
callback2 = ModelCheckpoint()
# no callbacks
trainer = Trainer(enable_checkpointing=False, callbacks=[], **kwargs)
assert not any(isinstance(c, ModelCheckpoint) for c in trainer.callbacks)
assert trainer.checkpoint_callback is None
# default configuration
trainer = Trainer(callbacks=[], **kwargs)
assert sum(1 for c in trainer.callbacks if isinstance(c, ModelCheckpoint)) == 1
assert isinstance(trainer.checkpoint_callback, ModelCheckpoint)
# custom callback passed to callbacks list, enable_checkpointing=True is ignored
trainer = Trainer(enable_checkpointing=True, callbacks=[callback1], **kwargs)
assert [c for c in trainer.callbacks if isinstance(c, ModelCheckpoint)] == [callback1]
assert trainer.checkpoint_callback == callback1
# multiple checkpoint callbacks
trainer = Trainer(callbacks=[callback1, callback2], **kwargs)
assert trainer.checkpoint_callback == callback1
assert trainer.checkpoint_callbacks == [callback1, callback2]
with pytest.raises(MisconfigurationException, match="`enable_checkpointing=False` but found `ModelCheckpoint`"):
Trainer(enable_checkpointing=False, callbacks=[callback1], **kwargs)
def test_val_check_interval_checkpoint_files(tmpdir):
"""Test correct checkpoint naming when validating/checkpointing multiple times per epoch."""
model = LogInTwoMethods()
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="val_acc", mode="max")
trainer = Trainer(
default_root_dir=tmpdir,
val_check_interval=0.2,
max_epochs=1,
limit_train_batches=10,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
files = {p.basename for p in tmpdir.listdir()}
assert files == {f"epoch=0-step={s}.ckpt" for s in [1, 3, 5, 7, 9]}
def test_current_score(tmpdir):
"""Check that the current_score value is correct and was saved."""
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", (self.current_epoch + 1) / 10)
return super().training_step(*args)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=3, monitor="foo", mode="min")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(TestModel())
assert model_checkpoint.current_score == 0.3
ckpts = [torch.load(str(ckpt)) for ckpt in tmpdir.listdir()]
ckpts = [
ckpt["callbacks"][
"ModelCheckpoint{'monitor': 'foo', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
for ckpt in ckpts
]
assert sorted(ckpt["current_score"] for ckpt in ckpts) == [0.1, 0.2, 0.3]
@pytest.mark.parametrize("mode", ["min", "max"])
def test_current_score_when_nan(tmpdir, mode: str):
"""Check that ModelCheckpoint handles NaN values correctly."""
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", float("nan"))
return super().training_step(*args)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor="foo", mode=mode)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(TestModel())
expected = float("inf" if mode == "min" else "-inf")
assert model_checkpoint.best_model_score == expected
assert model_checkpoint.current_score == expected
@pytest.mark.parametrize("use_omegaconf", [False, pytest.param(True, marks=RunIf(omegaconf=True))])
def test_hparams_type(tmpdir, use_omegaconf):
class TestModel(BoringModel):
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor="foo")
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
hp = {"test_hp_0": 1, "test_hp_1": 2}
hp = OmegaConf.create(hp) if use_omegaconf else Namespace(**hp)
model = TestModel(hp)
trainer.fit(model)
ckpt = trainer.checkpoint_connector.dump_checkpoint()
if use_omegaconf:
assert isinstance(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY], Container)
else:
# make sure it's not AttributeDict
assert type(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY]) is dict
def test_ckpt_version_after_rerun_new_trainer(tmpdir):
"""Check that previous checkpoints are renamed to have the correct version suffix when new trainer instances
are used."""
epochs = 2
for i in range(epochs):
mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="epoch", filename="{epoch}")
trainer = Trainer(
max_epochs=epochs,
limit_train_batches=1,
limit_val_batches=1,
default_root_dir=tmpdir,
callbacks=[mc],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(BoringModel())
# check best_k_models state
expected = {"epoch=0-v1.ckpt", "epoch=1-v1.ckpt"} if i else {"epoch=0.ckpt", "epoch=1.ckpt"}
assert {Path(f).name for f in mc.best_k_models} == expected
# check created ckpts
actual = {f.basename for f in tmpdir.listdir()}
assert actual == {"epoch=0.ckpt", "epoch=1.ckpt", "epoch=0-v1.ckpt", "epoch=1-v1.ckpt"}
def test_ckpt_version_after_rerun_same_trainer(tmpdir):
"""Check that previous checkpoints are renamed to have the correct version suffix when the same trainer
instance is used."""
mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="epoch", filename="test")
mc.STARTING_VERSION = 9
trainer = Trainer(
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
default_root_dir=tmpdir,
callbacks=[mc],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(BoringModel())
trainer.fit_loop.max_epochs = 4
trainer.fit(BoringModel())
ckpt_range = range(mc.STARTING_VERSION, trainer.max_epochs + mc.STARTING_VERSION)
expected = {"test.ckpt", *(f"test-v{i}.ckpt" for i in ckpt_range)}
# check best_k_models state
assert {Path(f).name for f in mc.best_k_models} == expected
# check created ckpts
assert set(os.listdir(tmpdir)) == expected
def test_model_checkpoint_mode_options():
with pytest.raises(MisconfigurationException, match="`mode` can be .* but got unknown_option"):
ModelCheckpoint(mode="unknown_option")
def test_check_val_every_n_epochs_top_k_integration(tmpdir):
model = BoringModel()
mc = ModelCheckpoint(dirpath=tmpdir, monitor="epoch", save_top_k=-1, filename="{epoch}")
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
num_sanity_val_steps=0,
max_epochs=5,
check_val_every_n_epoch=2,
callbacks=mc,
enable_model_summary=False,
logger=False,
)
trainer.fit(model)
assert set(os.listdir(tmpdir)) == {"epoch=1.ckpt", "epoch=3.ckpt"}
def test_model_checkpoint_saveload_ckpt(tmpdir):
ckpt = {
"monitor": "random_value",
"best_model_path": "epoch=10-step=1436.ckpt",
"best_model_score": torch.tensor(2.246),
"current_score": torch.tensor(1.5),
"dirpath": tmpdir,
"best_k_models": {"epoch=10-step=1436.ckpt": torch.tensor(2.246)},
"kth_best_model_path": "epoch=10-step=1436.ckpt",
"kth_value": torch.tensor(2.246),
"last_model_path": "last2245.ckpt",
}
# test on_save_checkpoint
cb_write = ModelCheckpoint(dirpath=tmpdir, monitor="random_value", save_top_k=-1, save_last=True)
for key, val in ckpt.items():
setattr(cb_write, key, val)
written_ckpt = cb_write.on_save_checkpoint("", "", "")
for state in ckpt:
assert ckpt[state] == written_ckpt[state]
# test on_load_checkpoint
# Note: "current_score", "dirpath" and "monitor" are currently not restored by on_load_checkpoint.
# We therefore set "dirpath" and "monitor" to something different than for ckpt/cb_write so we can assert them.
# "current_score" is left as initialized, i.e. None, and can therefore also be asserted
cb_restore = ModelCheckpoint(dirpath=tmpdir + "restore", monitor=None, save_top_k=-1, save_last=True)
cb_restore.on_load_checkpoint("", "", written_ckpt)
for key, val in written_ckpt.items():
if key not in ("current_score", "dirpath", "monitor"):
assert getattr(cb_restore, key) == val
else:
assert getattr(cb_restore, key) != val
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import pickle
import re
import time
from argparse import Namespace
from datetime import timedelta
from logging import INFO
from pathlib import Path
from typing import Union
from unittest import mock
from unittest.mock import call, MagicMock, Mock, patch
import cloudpickle
import pytest
import torch
import yaml
from torch import optim
import pytorch_lightning as pl
import tests.helpers.utils as tutils
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if _OMEGACONF_AVAILABLE:
from omegaconf import Container, OmegaConf
def test_model_checkpoint_state_key():
early_stopping = ModelCheckpoint(monitor="val_loss")
expected_id = (
"ModelCheckpoint{'monitor': 'val_loss', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': None}"
)
assert early_stopping.state_key == expected_id
class LogInTwoMethods(BoringModel):
def training_step(self, batch, batch_idx):
out = super().training_step(batch, batch_idx)
self.log("early_stop_on", out["loss"])
return out
def validation_epoch_end(self, outputs):
outs = torch.stack([x["x"] for x in outputs]).mean()
self.log("val_acc", outs)
def mock_training_epoch_loop(trainer):
# do not use `unittest.Mock` because we need to store the return value
calls = {}
old_get_monitor_value = trainer.fit_loop.epoch_loop._get_monitor_value
def mock(key):
value = old_get_monitor_value(key)
calls[trainer.current_epoch] = {key: value}
return value
trainer.fit_loop.epoch_loop._get_monitor_value = mock
return calls
@pytest.mark.parametrize(
"validation_step_none,val_dataloaders_none,monitor",
[(False, False, "val_log"), (True, False, "train_log_epoch"), (False, True, "val_log")],
)
@pytest.mark.parametrize("reduce_lr_on_plateau", [False, True])
def test_model_checkpoint_score_and_ckpt(
tmpdir, validation_step_none: bool, val_dataloaders_none: bool, monitor: str, reduce_lr_on_plateau: bool
):
"""Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and
checkpoint data."""
max_epochs = 3
limit_train_batches = 5
limit_val_batches = 7
lr, gamma = 1e-1, 2
class CustomBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.train_log_epochs = torch.randn(max_epochs, limit_train_batches)
self.val_logs = torch.randn(max_epochs, limit_val_batches)
self.scores = []
def training_step(self, batch, batch_idx):
log_value = self.train_log_epochs[self.current_epoch, batch_idx]
self.log("train_log", log_value, on_epoch=True)
return super().training_step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
log_value = self.val_logs[self.current_epoch, batch_idx]
self.log("val_log", log_value)
self.log("epoch", self.current_epoch, on_epoch=True)
return super().validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=lr)
if reduce_lr_on_plateau:
lr_scheduler = {
"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": monitor,
"strict": True,
}
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
return [optimizer], [lr_scheduler]
def on_train_epoch_end(self):
if "train" in monitor:
self.scores.append(self.trainer.logged_metrics[monitor])
def on_validation_epoch_end(self):
if not self.trainer.sanity_checking and "val" in monitor:
self.scores.append(self.trainer.logged_metrics[monitor])
filename = "{" + f"{monitor}" + ":.4f}-{epoch}"
checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)
model = CustomBoringModel()
if validation_step_none:
model.validation_step = None
if val_dataloaders_none:
model.val_dataloaders = None
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint],
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
max_epochs=max_epochs,
enable_progress_bar=False,
)
calls = mock_training_epoch_loop(trainer)
trainer.fit(model)
ckpt_files = list(Path(tmpdir).glob("*.ckpt"))
assert len(ckpt_files) == len(model.scores) == max_epochs
for epoch in range(max_epochs):
score = model.scores[epoch]
expected_score = getattr(model, f"{monitor}s")[epoch].mean().item()
expected_filename = f"{monitor}={score:.4f}-epoch={epoch}.ckpt"
assert math.isclose(score, expected_score, rel_tol=1e-4)
chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))
assert chk["epoch"] == epoch + 1
assert chk["global_step"] == limit_train_batches * (epoch + 1)
mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
assert mc_specific_data["current_score"] == score
if not reduce_lr_on_plateau:
actual_step_count = chk["lr_schedulers"][0]["_step_count"]
actual_lr = chk["lr_schedulers"][0]["_last_lr"][0]
# checkpoint is saved after updating lr_scheduler states
assert actual_step_count == epoch + 2 # step_count starts at 1
assert actual_lr == lr * gamma ** (epoch + 1)
else:
assert calls[epoch] == {monitor: score}
@pytest.mark.parametrize(
"val_check_interval,reduce_lr_on_plateau,epoch_aligned",
[(0.25, True, True), (0.25, False, True), (0.42, False, False)],
)
def test_model_checkpoint_score_and_ckpt_val_check_interval(
tmpdir, val_check_interval, reduce_lr_on_plateau, epoch_aligned
):
"""Test that when a model checkpoint is saved, it saves with the correct score appended to ckpt_path and
checkpoint data with val_check_interval."""
seed_everything(0)
max_epochs = 3
limit_train_batches = 12
limit_val_batches = 7
lr, gamma = 1e-1, 2
monitor = "val_log"
per_val_train_batches = int(limit_train_batches * val_check_interval)
per_epoch_val_checks, leftover_train_batches = divmod(limit_train_batches, per_val_train_batches)
class CustomBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.val_logs = torch.randn(per_epoch_val_checks * max_epochs, limit_val_batches)
self.val_loop_count = 0
self.scores = []
def validation_step(self, batch, batch_idx):
log_value = self.val_logs[self.val_loop_count, batch_idx]
self.log("val_log", log_value)
return super().validation_step(batch, batch_idx)
def validation_epoch_end(self, outputs):
self.val_loop_count += 1
super().validation_epoch_end(outputs)
self.scores.append(self.trainer.logged_metrics[monitor])
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=lr)
if reduce_lr_on_plateau:
lr_scheduler = {
"scheduler": optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": monitor,
"strict": True,
}
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma)
return [optimizer], [lr_scheduler]
filename = "{" + f"{monitor}" + ":.4f}-{epoch}"
checkpoint = ModelCheckpoint(dirpath=tmpdir, filename=filename, monitor=monitor, save_top_k=-1)
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint],
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
max_epochs=max_epochs,
val_check_interval=val_check_interval,
enable_progress_bar=False,
num_sanity_val_steps=0,
)
calls = mock_training_epoch_loop(trainer)
trainer.fit(model)
def _make_assertions(epoch, ix):
global_ix = ix + per_epoch_val_checks * epoch
# checkpoint saved at the end of training epoch will have updated lr_scheduler states
epoch_end_checkpoint = epoch_aligned and ix == (per_epoch_val_checks - 1)
score = model.scores[global_ix]
expected_score = getattr(model, f"{monitor}s")[global_ix].mean().item()
expected_filename = f"{monitor}={score:.4f}-epoch={epoch}.ckpt"
assert math.isclose(score, expected_score, rel_tol=1e-4)
chk = pl_load(os.path.join(checkpoint.dirpath, expected_filename))
assert chk["epoch"] == epoch + 1
expected_global_step = per_val_train_batches * (global_ix + 1) + (leftover_train_batches * epoch)
assert chk["global_step"] == expected_global_step
mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': False}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
assert mc_specific_data["current_score"] == score
if not reduce_lr_on_plateau:
actual_step_count = chk["lr_schedulers"][0]["_step_count"]
actual_lr = chk["lr_schedulers"][0]["_last_lr"][0]
assert actual_step_count == epoch + 1 + epoch_end_checkpoint
assert actual_lr == lr * gamma ** (epoch + epoch_end_checkpoint)
return score
ckpt_files = list(Path(tmpdir).glob("*.ckpt"))
assert len(ckpt_files) == len(model.scores) == per_epoch_val_checks * max_epochs
for epoch in range(max_epochs):
for i in range(per_epoch_val_checks):
score = _make_assertions(epoch, i)
if reduce_lr_on_plateau:
assert calls[epoch] == {monitor: score}
@pytest.mark.parametrize("save_top_k", [-1, 0, 1, 2])
def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int):
"""Test that dirpath=None in checkpoint callback is valid and that ckpt_path is set correctly."""
tutils.reset_seed()
model = LogInTwoMethods()
checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}", save_top_k=save_top_k)
max_epochs = 2
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs)
trainer.fit(model)
assert checkpoint.dirpath == tmpdir / trainer.logger.name / "version_0" / "checkpoints"
if save_top_k == -1:
ckpt_files = os.listdir(checkpoint.dirpath)
expected_ckpt_files = [f"epoch={i}.ckpt" for i in range(max_epochs)]
assert len(ckpt_files) == len(expected_ckpt_files) == max_epochs
assert set(ckpt_files) == set(expected_ckpt_files)
@pytest.mark.parametrize("save_top_k", [-1, 0, 1, 2])
def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int):
"""Test that None in checkpoint callback is valid and that chkp_path is set correctly."""
tutils.reset_seed()
model = LogInTwoMethods()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_top_k=save_top_k)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=2)
trainer.fit(model)
path_yaml = os.path.join(tmpdir, "best_k_models.yaml")
checkpoint.to_yaml(path_yaml)
d = yaml.full_load(open(path_yaml))
best_k = dict(checkpoint.best_k_models.items())
assert d == best_k
@pytest.mark.parametrize("logger_version,expected", [(None, "version_0"), (1, "version_1"), ("awesome", "awesome")])
def test_model_checkpoint_path(tmpdir, logger_version: Union[None, int, str], expected: str):
"""Test that "version_" prefix is only added when logger's version is an integer."""
tutils.reset_seed()
model = LogInTwoMethods()
logger = TensorBoardLogger(str(tmpdir), version=logger_version)
trainer = Trainer(default_root_dir=tmpdir, overfit_batches=0.2, max_epochs=2, logger=logger)
trainer.fit(model)
ckpt_version = Path(trainer.checkpoint_callback.dirpath).parent.name
assert ckpt_version == expected
def test_pickling(tmpdir):
ckpt = ModelCheckpoint(dirpath=tmpdir)
ckpt_pickled = pickle.dumps(ckpt)
ckpt_loaded = pickle.loads(ckpt_pickled)
assert vars(ckpt) == vars(ckpt_loaded)
ckpt_pickled = cloudpickle.dumps(ckpt)
ckpt_loaded = cloudpickle.loads(ckpt_pickled)
assert vars(ckpt) == vars(ckpt_loaded)
class ModelCheckpointTestInvocations(ModelCheckpoint):
# this class has to be defined outside the test function, otherwise we get pickle error
# due to the way ddp process is launched
def __init__(self, expected_count, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_count = expected_count
self.on_save_checkpoint_count = 0
def on_train_start(self, trainer, pl_module):
torch.save = Mock(wraps=torch.save)
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
# only rank 0 will call ``torch.save``
super().on_save_checkpoint(trainer, pl_module, checkpoint)
self.on_save_checkpoint_count += 1
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
assert self.best_model_path
assert self.best_model_score
assert self.on_save_checkpoint_count == self.expected_count
if trainer.is_global_zero:
assert torch.save.call_count == self.expected_count
else:
assert torch.save.call_count == 0
@RunIf(skip_windows=True, skip_49370=True)
def test_model_checkpoint_no_extraneous_invocations(tmpdir):
"""Test to ensure that the model callback saves the checkpoints only once in distributed mode."""
model = LogInTwoMethods()
num_epochs = 4
model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1)
trainer = Trainer(
strategy="ddp_spawn",
accelerator="cpu",
devices=2,
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=num_epochs,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_model_checkpoint_format_checkpoint_name(tmpdir):
# empty filename:
ckpt_name = ModelCheckpoint._format_checkpoint_name("", {"epoch": 3, "step": 2})
assert ckpt_name == "epoch=3-step=2"
ckpt_name = ModelCheckpoint._format_checkpoint_name(None, {"epoch": 3, "step": 2}, prefix="test")
assert ckpt_name == "test-epoch=3-step=2"
# no groups case:
ckpt_name = ModelCheckpoint._format_checkpoint_name("ckpt", {}, prefix="test")
assert ckpt_name == "test-ckpt"
# no prefix
ckpt_name = ModelCheckpoint._format_checkpoint_name("{epoch:03d}-{acc}", {"epoch": 3, "acc": 0.03})
assert ckpt_name == "epoch=003-acc=0.03"
# prefix
char_org = ModelCheckpoint.CHECKPOINT_JOIN_CHAR
ModelCheckpoint.CHECKPOINT_JOIN_CHAR = "@"
ckpt_name = ModelCheckpoint._format_checkpoint_name("{epoch},{acc:.5f}", {"epoch": 3, "acc": 0.03}, prefix="test")
assert ckpt_name == "test@epoch=3,acc=0.03000"
ModelCheckpoint.CHECKPOINT_JOIN_CHAR = char_org
# no dirpath set
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath=None).format_checkpoint_name({"epoch": 3, "step": 2})
assert ckpt_name == "epoch=3-step=2.ckpt"
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath="").format_checkpoint_name({"epoch": 5, "step": 4})
assert ckpt_name == "epoch=5-step=4.ckpt"
# CWD
ckpt_name = ModelCheckpoint(monitor="early_stop_on", dirpath=".").format_checkpoint_name({"epoch": 3, "step": 4})
assert ckpt_name == str(Path(".").resolve() / "epoch=3-step=4.ckpt")
# with version
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, filename="name")
ckpt_name = ckpt.format_checkpoint_name({}, ver=3)
assert ckpt_name == tmpdir / "name-v3.ckpt"
# using slashes
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}_{val/loss:.5f}")
ckpt_name = ckpt.format_checkpoint_name({"epoch": 4, "val/loss": 0.03})
assert ckpt_name == "epoch=4_val/loss=0.03000.ckpt"
# auto_insert_metric_name=False
ckpt_name = ModelCheckpoint._format_checkpoint_name(
"epoch={epoch:03d}-val_acc={val/acc}", {"epoch": 3, "val/acc": 0.03}, auto_insert_metric_name=False
)
assert ckpt_name == "epoch=003-val_acc=0.03"
class ModelCheckpointExtensionTest(ModelCheckpoint):
FILE_EXTENSION = ".tpkc"
def test_model_checkpoint_file_extension(tmpdir):
"""Test ModelCheckpoint with different file extension."""
model = LogInTwoMethods()
model_checkpoint = ModelCheckpointExtensionTest(
monitor="early_stop_on", dirpath=tmpdir, save_top_k=1, save_last=True
)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[model_checkpoint], max_steps=1, logger=False)
trainer.fit(model)
expected = ["epoch=0-step=0.tpkc", "last.tpkc"]
assert set(expected) == set(os.listdir(tmpdir))
def test_model_checkpoint_save_last(tmpdir):
"""Tests that save_last produces only one last checkpoint."""
seed_everything()
model = LogInTwoMethods()
epochs = 3
ModelCheckpoint.CHECKPOINT_NAME_LAST = "last-{epoch}"
model_checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, save_top_k=-1, save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=epochs,
limit_train_batches=10,
limit_val_batches=10,
logger=False,
)
trainer.fit(model)
last_filename = model_checkpoint._format_checkpoint_name(
ModelCheckpoint.CHECKPOINT_NAME_LAST, {"epoch": trainer.current_epoch}
)
last_filename = last_filename + ".ckpt"
assert str(tmpdir / last_filename) == model_checkpoint.last_model_path
assert set(os.listdir(tmpdir)) == set(
[f"epoch={i}-step={j}.ckpt" for i, j in zip(range(epochs), [9, 19, 29])] + [last_filename]
)
ModelCheckpoint.CHECKPOINT_NAME_LAST = "last"
def test_invalid_top_k(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative save_top_k argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= -1"):
ModelCheckpoint(dirpath=tmpdir, save_top_k=-3)
def test_none_monitor_top_k(tmpdir):
"""Test that a warning appears for positive top_k with monitor=None."""
with pytest.raises(
MisconfigurationException, match=r"ModelCheckpoint\(save_top_k=3, monitor=None\) is not a valid*"
):
ModelCheckpoint(dirpath=tmpdir, save_top_k=3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, save_top_k=-1)
ModelCheckpoint(dirpath=tmpdir, save_top_k=0)
ModelCheckpoint(dirpath=tmpdir, save_top_k=1)
def test_invalid_every_n_epochs(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= 0"):
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=-3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=0)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=1)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)
def test_invalid_every_n_train_steps(tmpdir):
"""Make sure that a MisconfigurationException is raised for a negative every_n_epochs argument."""
with pytest.raises(MisconfigurationException, match=r".*Must be >= 0"):
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=-3)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1)
ModelCheckpoint(dirpath=tmpdir, every_n_epochs=2)
def test_invalid_trigger_combination(tmpdir):
"""Test that a MisconfigurationException is raised if more than one of every_n_epochs, every_n_train_steps, and
train_time_interval are enabled together."""
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=1, every_n_epochs=2)
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_epochs=2)
with pytest.raises(MisconfigurationException, match=r".*Combination of parameters every_n_train_steps"):
ModelCheckpoint(train_time_interval=timedelta(minutes=1), every_n_train_steps=2)
# These should not fail
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=3)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=4, every_n_epochs=0)
ModelCheckpoint(dirpath=tmpdir, every_n_train_steps=0, every_n_epochs=0, train_time_interval=timedelta(minutes=1))
def test_none_every_n_train_steps_val_epochs(tmpdir):
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir)
assert checkpoint_callback.every_n_epochs == 1
assert checkpoint_callback._every_n_train_steps == 0
def test_model_checkpoint_save_last_none_monitor(tmpdir, caplog):
"""Test that it is possible to save all checkpoints when monitor=None."""
seed_everything()
model = LogInTwoMethods()
epochs = 2
checkpoint_callback = ModelCheckpoint(monitor=None, dirpath=tmpdir, save_top_k=-1, save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
limit_train_batches=10,
limit_val_batches=10,
max_epochs=epochs,
logger=False,
)
with caplog.at_level(INFO):
trainer.fit(model)
assert "will duplicate the last checkpoint saved" in caplog.text
# these should not be set if monitor is None
assert checkpoint_callback.monitor is None
assert checkpoint_callback.best_model_path == tmpdir / "epoch=1-step=19.ckpt"
assert checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
assert checkpoint_callback.best_model_score is None
assert checkpoint_callback.best_k_models == {}
assert checkpoint_callback.kth_best_model_path == ""
# check that the correct ckpts were created
expected = [f"epoch={i}-step={j}.ckpt" for i, j in zip(range(epochs), [9, 19])]
expected.append("last.ckpt")
assert set(os.listdir(tmpdir)) == set(expected)
@pytest.mark.parametrize("every_n_epochs", list(range(4)))
def test_model_checkpoint_every_n_epochs(tmpdir, every_n_epochs):
model = LogInTwoMethods()
epochs = 5
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir, filename="{epoch}", save_top_k=-1, every_n_epochs=every_n_epochs
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
max_epochs=epochs,
limit_train_batches=1,
limit_val_batches=1,
logger=False,
)
trainer.fit(model)
# check that the correct ckpts were created
expected = [f"epoch={e}.ckpt" for e in range(epochs) if not (e + 1) % every_n_epochs] if every_n_epochs > 0 else []
assert set(os.listdir(tmpdir)) == set(expected)
def test_ckpt_every_n_train_steps(tmpdir):
"""Tests that the checkpoints are saved every n training steps."""
model = LogInTwoMethods()
every_n_train_steps = 16
max_epochs = 2
epoch_length = 64
checkpoint_callback = ModelCheckpoint(
filename="{step}",
every_n_epochs=0,
every_n_train_steps=every_n_train_steps,
dirpath=tmpdir,
save_top_k=-1,
save_last=False,
)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
enable_progress_bar=False,
callbacks=[checkpoint_callback],
logger=False,
)
trainer.fit(model)
expected = [
f"step={i}.ckpt" for i in range(every_n_train_steps - 1, max_epochs * epoch_length, every_n_train_steps)
]
assert set(os.listdir(tmpdir)) == set(expected)
@mock.patch("pytorch_lightning.callbacks.model_checkpoint.time")
def test_model_checkpoint_train_time_interval(mock_datetime, tmpdir) -> None:
"""Tests that the checkpoints are saved at the specified time interval."""
seconds_per_batch = 7
start_time = time.monotonic()
batches_per_epoch = 64
num_epochs = 2
max_batches = batches_per_epoch * num_epochs + 1
mock_datetime.monotonic.side_effect = [start_time + seconds_per_batch * i for i in range(max_batches)]
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
min_epochs=num_epochs,
max_epochs=num_epochs,
enable_progress_bar=False,
callbacks=[
ModelCheckpoint(
filename="{epoch}-{step}",
dirpath=tmpdir,
train_time_interval=timedelta(minutes=1),
save_top_k=-1,
save_last=False,
)
],
logger=False,
)
trainer.fit(model)
# Each batch takes 7 sec and we checkpoint every minute. There are 64
# batches per epoch, so total time to run is 7*64*2 = 896 sec < 14.96 minutes,
# so we should have 14 checkpoints.
assert len(os.listdir(tmpdir)) == 14
def test_model_checkpoint_topk_zero(tmpdir):
"""Test that no checkpoints are saved when save_top_k=0."""
model = LogInTwoMethods()
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_top_k=0, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=2, logger=False)
trainer.fit(model)
# these should not be set if monitor is None
assert checkpoint_callback.monitor is None
assert checkpoint_callback.best_model_path == ""
assert checkpoint_callback.best_model_score is None
assert checkpoint_callback.best_k_models == {}
assert checkpoint_callback.kth_best_model_path == ""
# check that only the last ckpt was created
assert os.listdir(tmpdir) == ["last.ckpt"]
assert checkpoint_callback.last_model_path == tmpdir / "last.ckpt"
def test_model_checkpoint_topk_all(tmpdir):
"""Test that save_top_k=-1 tracks the best models when monitor key is provided."""
seed_everything(1000)
epochs = 3
model = BoringModel()
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir, filename="{epoch}", monitor="epoch", mode="max", save_top_k=-1
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[checkpoint_callback],
max_epochs=epochs,
logger=False,
val_check_interval=1.0,
)
trainer.fit(model)
assert checkpoint_callback.monitor == "epoch"
assert checkpoint_callback.best_model_path == tmpdir / "epoch=2.ckpt"
assert checkpoint_callback.best_model_score == epochs - 1
assert len(os.listdir(tmpdir)) == len(checkpoint_callback.best_k_models) == epochs
assert set(checkpoint_callback.best_k_models.keys()) == {str(tmpdir / f"epoch={i}.ckpt") for i in range(epochs)}
assert checkpoint_callback.kth_best_model_path == tmpdir / "epoch=0.ckpt"
def test_ckpt_metric_names(tmpdir):
model = LogInTwoMethods()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gradient_clip_val=1.0,
overfit_batches=0.20,
enable_progress_bar=False,
limit_train_batches=0.01,
limit_val_batches=0.01,
callbacks=[ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, filename="{val_loss:.2f}")],
)
trainer.fit(model)
# make sure the checkpoint we saved has the metric in the name
ckpts = os.listdir(tmpdir)
ckpts = [x for x in ckpts if "val_loss" in x]
assert len(ckpts) == 1
val = re.sub("[^0-9.]", "", ckpts[0])
assert len(val) > 3
def test_default_checkpoint_behavior(tmpdir):
seed_everything(1234)
model = LogInTwoMethods()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5
)
with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock:
trainer.fit(model)
results = trainer.test()
assert len(results) == 1
save_dir = tmpdir / "lightning_logs" / "version_0" / "checkpoints"
save_weights_only = trainer.checkpoint_callback.save_weights_only
save_mock.assert_has_calls(
[
call(save_dir / "epoch=0-step=4.ckpt", save_weights_only),
call(save_dir / "epoch=1-step=9.ckpt", save_weights_only),
call(save_dir / "epoch=2-step=14.ckpt", save_weights_only),
]
)
ckpts = os.listdir(save_dir)
assert len(ckpts) == 1
assert ckpts[0] == "epoch=2-step=14.ckpt"
@pytest.mark.parametrize("max_epochs", [1, 2])
@pytest.mark.parametrize("should_validate", [True, False])
@pytest.mark.parametrize("save_last", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
def test_model_checkpoint_save_last_warning(
tmpdir, caplog, max_epochs: int, should_validate: bool, save_last: bool, verbose: bool
):
"""Tests 'Saving latest checkpoint...' log."""
model = LogInTwoMethods()
if not should_validate:
model.validation_step = None
ckpt = ModelCheckpoint(monitor="early_stop_on", dirpath=tmpdir, save_top_k=0, save_last=save_last, verbose=verbose)
trainer = Trainer(
default_root_dir=tmpdir, callbacks=[ckpt], max_epochs=max_epochs, limit_train_batches=1, limit_val_batches=1
)
with caplog.at_level(logging.INFO):
trainer.fit(model)
assert caplog.messages.count("Saving latest checkpoint...") == (verbose and save_last)
def test_model_checkpoint_save_last_checkpoint_contents(tmpdir):
"""Tests that the save_last checkpoint contains the latest information."""
seed_everything(100)
model = LogInTwoMethods()
num_epochs = 3
model_checkpoint = ModelCheckpoint(
monitor="early_stop_on", dirpath=tmpdir, filename="{epoch}", save_top_k=num_epochs, save_last=True
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=num_epochs,
limit_train_batches=2,
limit_val_batches=2,
)
trainer.fit(model)
path_last_epoch = str(tmpdir / f"epoch={num_epochs - 1}.ckpt")
path_last = str(tmpdir / "last.ckpt")
assert path_last == model_checkpoint.last_model_path
assert os.path.isfile(path_last_epoch)
ckpt_last_epoch = torch.load(path_last_epoch)
ckpt_last = torch.load(path_last)
assert ckpt_last_epoch["epoch"] == ckpt_last["epoch"]
assert ckpt_last_epoch["global_step"] == ckpt_last["global_step"]
ckpt_id = (
"ModelCheckpoint{'monitor': 'early_stop_on', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
)
assert ckpt_last["callbacks"][ckpt_id] == ckpt_last_epoch["callbacks"][ckpt_id]
# it is easier to load the model objects than to iterate over the raw dict of tensors
model_last_epoch = LogInTwoMethods.load_from_checkpoint(path_last_epoch)
model_last = LogInTwoMethods.load_from_checkpoint(model_checkpoint.last_model_path)
for w0, w1 in zip(model_last_epoch.parameters(), model_last.parameters()):
assert w0.eq(w1).all()
@pytest.mark.parametrize("mode", ["min", "max"])
def test_checkpointing_with_nan_as_first(tmpdir, mode):
monitor = [float("nan")]
monitor += [5, 7, 8] if mode == "max" else [8, 7, 5]
class CurrentModel(LogInTwoMethods):
def validation_epoch_end(self, outputs):
val_loss = monitor[self.current_epoch]
self.log("abc", val_loss)
model = CurrentModel()
callback = ModelCheckpoint(monitor="abc", mode=mode, save_top_k=1, dirpath=tmpdir)
trainer = Trainer(
callbacks=[callback],
default_root_dir=tmpdir,
val_check_interval=1.0,
max_epochs=len(monitor),
)
trainer.save_checkpoint = MagicMock()
trainer.fit(model)
# check that last one is also the best one
assert trainer.save_checkpoint.call_count == len(monitor)
assert mode == "min" and callback.best_model_score == 5 or mode == "max" and callback.best_model_score == 8
def test_checkpoint_repeated_strategy(tmpdir):
"""This test validates checkpoint can be called several times without increasing internally its global step if
nothing run."""
checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=tmpdir, filename="{epoch:02d}")
class ExtendedBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("val_loss", loss)
model = ExtendedBoringModel()
model.validation_epoch_end = None
trainer_kwargs = {
"max_epochs": 1,
"limit_train_batches": 2,
"limit_val_batches": 2,
"limit_test_batches": 2,
"enable_progress_bar": False,
"enable_model_summary": False,
}
trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback])
trainer.fit(model)
assert os.listdir(tmpdir) == ["epoch=00.ckpt"]
for idx in range(4):
# load from checkpoint
trainer = pl.Trainer(**trainer_kwargs, default_root_dir=tmpdir)
trainer.fit(model, ckpt_path=checkpoint_callback.best_model_path)
trainer.test(ckpt_path=checkpoint_callback.best_model_path, verbose=False)
assert set(os.listdir(tmpdir)) == {"epoch=00.ckpt", "lightning_logs"}
assert set(os.listdir(tmpdir / "lightning_logs")) == {f"version_{i}" for i in range(4)}
def test_checkpoint_repeated_strategy_extended(tmpdir):
"""This test validates checkpoint can be called several times without increasing internally its global step if
nothing run."""
class ExtendedBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"val_loss": loss}
def validation_epoch_end(self, *_):
...
def assert_trainer_init(trainer):
assert trainer.global_step == 0
assert trainer.current_epoch == 0
def get_last_checkpoint(ckpt_dir):
last = ckpt_dir.listdir(sort=True)[-1]
return str(last)
def assert_checkpoint_content(ckpt_dir):
chk = pl_load(get_last_checkpoint(ckpt_dir))
assert chk["epoch"] == epochs
assert chk["global_step"] == 4
def assert_checkpoint_log_dir(idx):
lightning_logs = tmpdir / "lightning_logs"
actual = [d.basename for d in lightning_logs.listdir(sort=True)]
assert actual == [f"version_{i}" for i in range(idx + 1)]
actual = [d.basename for d in ckpt_dir.listdir()]
assert len(actual) == epochs, actual
ckpt_dir = tmpdir / "checkpoints"
checkpoint_cb = ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)
epochs = 2
limit_train_batches = 2
trainer_config = dict(
default_root_dir=tmpdir,
max_epochs=epochs,
limit_train_batches=limit_train_batches,
limit_val_batches=3,
limit_test_batches=4,
callbacks=[checkpoint_cb],
)
trainer = pl.Trainer(**trainer_config)
assert_trainer_init(trainer)
model = ExtendedBoringModel()
trainer.fit(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs - 1
assert_checkpoint_log_dir(0)
assert_checkpoint_content(ckpt_dir)
trainer.validate(model)
assert trainer.current_epoch == epochs - 1
trainer.test(model)
assert trainer.current_epoch == epochs - 1
for idx in range(1, 5):
chk = get_last_checkpoint(ckpt_dir)
assert_checkpoint_content(ckpt_dir)
# load from checkpoint
trainer_config["callbacks"] = [ModelCheckpoint(dirpath=ckpt_dir, save_top_k=-1)]
trainer = pl.Trainer(**trainer_config)
assert_trainer_init(trainer)
model = ExtendedBoringModel()
trainer.test(model)
assert trainer.global_step == 0
assert trainer.current_epoch == 0
trainer.fit(model, ckpt_path=chk)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
trainer.validate(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
trainer.fit(model)
assert trainer.global_step == epochs * limit_train_batches
assert trainer.current_epoch == epochs
assert_checkpoint_log_dir(idx)
def test_configure_model_checkpoint(tmpdir):
"""Test all valid and invalid ways a checkpoint callback can be passed to the Trainer."""
kwargs = dict(default_root_dir=tmpdir)
callback1 = ModelCheckpoint()
callback2 = ModelCheckpoint()
# no callbacks
trainer = Trainer(enable_checkpointing=False, callbacks=[], **kwargs)
assert not any(isinstance(c, ModelCheckpoint) for c in trainer.callbacks)
assert trainer.checkpoint_callback is None
# default configuration
trainer = Trainer(callbacks=[], **kwargs)
assert sum(1 for c in trainer.callbacks if isinstance(c, ModelCheckpoint)) == 1
assert isinstance(trainer.checkpoint_callback, ModelCheckpoint)
# custom callback passed to callbacks list, enable_checkpointing=True is ignored
trainer = Trainer(enable_checkpointing=True, callbacks=[callback1], **kwargs)
assert [c for c in trainer.callbacks if isinstance(c, ModelCheckpoint)] == [callback1]
assert trainer.checkpoint_callback == callback1
# multiple checkpoint callbacks
trainer = Trainer(callbacks=[callback1, callback2], **kwargs)
assert trainer.checkpoint_callback == callback1
assert trainer.checkpoint_callbacks == [callback1, callback2]
with pytest.raises(MisconfigurationException, match="`enable_checkpointing=False` but found `ModelCheckpoint`"):
Trainer(enable_checkpointing=False, callbacks=[callback1], **kwargs)
def test_val_check_interval_checkpoint_files(tmpdir):
"""Test correct checkpoint naming when validating/checkpointing multiple times per epoch."""
model = LogInTwoMethods()
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="val_acc", mode="max")
trainer = Trainer(
default_root_dir=tmpdir,
val_check_interval=0.2,
max_epochs=1,
limit_train_batches=10,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
files = {p.basename for p in tmpdir.listdir()}
assert files == {f"epoch=0-step={s}.ckpt" for s in [1, 3, 5, 7, 9]}
def test_current_score(tmpdir):
"""Check that the current_score value is correct and was saved."""
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", (self.current_epoch + 1) / 10)
return super().training_step(*args)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=3, monitor="foo", mode="min")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(TestModel())
assert model_checkpoint.current_score == 0.3
ckpts = [torch.load(str(ckpt)) for ckpt in tmpdir.listdir()]
ckpts = [
ckpt["callbacks"][
"ModelCheckpoint{'monitor': 'foo', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
for ckpt in ckpts
]
assert sorted(ckpt["current_score"] for ckpt in ckpts) == [0.1, 0.2, 0.3]
@pytest.mark.parametrize("mode", ["min", "max"])
def test_current_score_when_nan(tmpdir, mode: str):
"""Check that ModelCheckpoint handles NaN values correctly."""
class TestModel(BoringModel):
def training_step(self, *args):
self.log("foo", float("nan"))
return super().training_step(*args)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor="foo", mode=mode)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(TestModel())
expected = float("inf" if mode == "min" else "-inf")
assert model_checkpoint.best_model_score == expected
assert model_checkpoint.current_score == expected
@pytest.mark.parametrize("use_omegaconf", [False, pytest.param(True, marks=RunIf(omegaconf=True))])
def test_hparams_type(tmpdir, use_omegaconf):
class TestModel(BoringModel):
def __init__(self, hparams):
super().__init__()
self.save_hyperparameters(hparams)
model_checkpoint = ModelCheckpoint(dirpath=tmpdir, save_top_k=1, monitor="foo")
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
callbacks=[model_checkpoint],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
hp = {"test_hp_0": 1, "test_hp_1": 2}
hp = OmegaConf.create(hp) if use_omegaconf else Namespace(**hp)
model = TestModel(hp)
trainer.fit(model)
ckpt = trainer.checkpoint_connector.dump_checkpoint()
if use_omegaconf:
assert isinstance(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY], Container)
else:
# make sure it's not AttributeDict
assert type(ckpt[model.CHECKPOINT_HYPER_PARAMS_KEY]) is dict
def test_ckpt_version_after_rerun_new_trainer(tmpdir):
"""Check that previous checkpoints are renamed to have the correct version suffix when new trainer instances
are used."""
epochs = 2
for i in range(epochs):
mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="epoch", filename="{epoch}")
trainer = Trainer(
max_epochs=epochs,
limit_train_batches=1,
limit_val_batches=1,
default_root_dir=tmpdir,
callbacks=[mc],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(BoringModel())
# check best_k_models state
expected = {"epoch=0-v1.ckpt", "epoch=1-v1.ckpt"} if i else {"epoch=0.ckpt", "epoch=1.ckpt"}
assert {Path(f).name for f in mc.best_k_models} == expected
# check created ckpts
actual = {f.basename for f in tmpdir.listdir()}
assert actual == {"epoch=0.ckpt", "epoch=1.ckpt", "epoch=0-v1.ckpt", "epoch=1-v1.ckpt"}
def test_ckpt_version_after_rerun_same_trainer(tmpdir):
"""Check that previous checkpoints are renamed to have the correct version suffix when the same trainer
instance is used."""
mc = ModelCheckpoint(dirpath=tmpdir, save_top_k=-1, monitor="epoch", filename="test")
mc.STARTING_VERSION = 9
trainer = Trainer(
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
default_root_dir=tmpdir,
callbacks=[mc],
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(BoringModel())
trainer.fit_loop.max_epochs = 4
trainer.fit(BoringModel())
ckpt_range = range(mc.STARTING_VERSION, trainer.max_epochs + mc.STARTING_VERSION)
expected = {"test.ckpt", *(f"test-v{i}.ckpt" for i in ckpt_range)}
# check best_k_models state
assert {Path(f).name for f in mc.best_k_models} == expected
# check created ckpts
assert set(os.listdir(tmpdir)) == expected
def test_model_checkpoint_mode_options():
with pytest.raises(MisconfigurationException, match="`mode` can be .* but got unknown_option"):
ModelCheckpoint(mode="unknown_option")
def test_check_val_every_n_epochs_top_k_integration(tmpdir):
model = BoringModel()
mc = ModelCheckpoint(dirpath=tmpdir, monitor="epoch", save_top_k=-1, filename="{epoch}")
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
num_sanity_val_steps=0,
max_epochs=5,
check_val_every_n_epoch=2,
callbacks=mc,
enable_model_summary=False,
logger=False,
)
trainer.fit(model)
assert set(os.listdir(tmpdir)) == {"epoch=1.ckpt", "epoch=3.ckpt"}
def test_model_checkpoint_saveload_ckpt(tmpdir):
ckpt = {
"monitor": "random_value",
"best_model_path": "epoch=10-step=1436.ckpt",
"best_model_score": torch.tensor(2.246),
"current_score": torch.tensor(1.5),
"dirpath": tmpdir,
"best_k_models": {"epoch=10-step=1436.ckpt": torch.tensor(2.246)},
"kth_best_model_path": "epoch=10-step=1436.ckpt",
"kth_value": torch.tensor(2.246),
"last_model_path": "last2245.ckpt",
}
# test on_save_checkpoint
cb_write = ModelCheckpoint(dirpath=tmpdir, monitor="random_value", save_top_k=-1, save_last=True)
for key, val in ckpt.items():
setattr(cb_write, key, val)
written_ckpt = cb_write.on_save_checkpoint("", "", "")
for state in ckpt:
assert ckpt[state] == written_ckpt[state]
# test on_load_checkpoint
# Note: "current_score", "dirpath" and "monitor" are currently not restored by on_load_checkpoint.
# We therefore set "dirpath" and "monitor" to something different than for ckpt/cb_write so we can assert them.
# "current_score" is left as initialized, i.e. None, and can therefore also be asserted
cb_restore = ModelCheckpoint(dirpath=tmpdir + "restore", monitor=None, save_top_k=-1, save_last=True)
cb_restore.on_load_checkpoint("", "", written_ckpt)
for key, val in written_ckpt.items():
if key not in ("current_score", "dirpath", "monitor"):
assert getattr(cb_restore, key) == val
else:
assert getattr(cb_restore, key) != val
|
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, List, Optional, Union
from lxml import etree
from oadr2.schemas import EventSchema, SignalSchema
def format_duration(duration: Union[timedelta, None]) -> str:
if not duration:
return "PT0M"
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return f"P0Y0M0DT{hours}H{minutes}M{seconds}S"
def format_datetime(time: datetime) -> str:
return f"{time.isoformat()}Z"
class AdrEventStatus(Enum):
PENDING = "near"
ACTIVE = "active"
CANCELLED = "cancelled"
COMPLETED = "completed"
@dataclass
class AdrInterval:
index: int
level: float
duration: timedelta
def to_xml(self):
return f"""
<ei:interval>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
<ical:uid>
<ical:text>{self.index}</ical:text>
</ical:uid>
<ei:signalPayload>
<ei:payloadFloat>
<ei:value>{self.level}</ei:value>
</ei:payloadFloat>
</ei:signalPayload>
</ei:interval>"""
class AdrEvent:
def __init__(
self,
id: Union[str, None],
start: datetime,
signals: List[Dict[str, Union[float, int, timedelta]]],
status: AdrEventStatus,
mod_number: Optional[int] = 0,
end: Optional[datetime] = None,
start_before: Optional[timedelta] = None,
start_after: Optional[timedelta] = None,
original_start: datetime = None,
cancellation_offset: timedelta = None,
group_ids: Optional[List[str]] = None,
resource_ids: Optional[List[str]] = None,
party_ids: Optional[List[str]] = None,
ven_ids: Optional[List[str]] = ['VEN_ID'],
vtn_id: Optional[str] = "TH_VTN",
market_context: Optional[str] = "http://market.context",
test_event: bool = False,
priority: int = 1,
response_required: bool = True,
signal_name: str = "simple"
):
self.id = id
self.start = start
self.original_start = original_start or start
self.cancellation_offset = cancellation_offset
self.raw_signals = [signal for signal in signals]
self.signals = signals
self.intervals = [AdrInterval(**signal) for signal in self.signals]
self.duration = timedelta()
for signal in self.signals:
self.duration += signal["duration"]
self.end = end or self.start + self.duration
self.group_ids = group_ids
self.resource_ids = resource_ids
self.party_ids = party_ids
self.ven_ids = ven_ids
self.mod_number = mod_number
self.status = status
self.start_before = start_before
self.start_after = start_after
self.vtn_id = vtn_id
self.market_context = market_context
self.created_date = datetime(2020, 1, 1, 10, 10)
self.test_event = test_event
self.priority = priority
self.response_required = response_required
self.signal_name = signal_name
def to_obj(self):
_signals = [
dict(
index=s["index"],
level=s["level"],
duration=format_duration(s["duration"])
) for s in self.signals
]
return EventSchema(
id=self.id,
vtn_id=self.vtn_id,
mod_number=self.mod_number,
start=self.start,
original_start=self.original_start,
end=self.end,
signals=[SignalSchema(**signal) for signal in _signals],
status=self.status.value,
cancellation_offset=format_duration(self.cancellation_offset) if self.cancellation_offset else None,
ven_ids=self.ven_ids,
market_market_context=self.market_context,
group_ids=self.group_ids,
resource_ids=self.resource_ids,
party_ids=self.party_ids,
test_event=self.test_event,
priority=self.priority
)
def to_xml(self):
intervals_xml = "".join([interval.to_xml() for interval in self.intervals])
start_after = f"""<ical:tolerance>
<ical:tolerate>
<ical:startafter>{format_duration(self.start_after)}</ical:startafter>
</ical:tolerate>
</ical:tolerance>""" if self.start_after else ""
ven_xml = f"<ei:venID>{",".join(self.ven_ids)}</ei:venID>" if self.ven_ids else ""
group_xml = f"<ei:groupID>{",".join(self.group_ids)}</ei:groupID>" if self.group_ids else ""
resource_xml = f"<ei:resourceID>{",".join(self.resource_ids)}</ei:resourceID>" if self.resource_ids else ""
party_xml = f"<ei:partyID>{",".join(self.party_ids)}</ei:partyID>" if self.party_ids else ""
return f"""
<oadrEvent>
<ei:eiEvent>
<ei:eventDescriptor>
<ei:eventID>{self.id}</ei:eventID>
<ei:modificationNumber>{self.mod_number}</ei:modificationNumber>
<ei:priority>{self.priority}</ei:priority>
<ei:eiMarketContext>
<emix:marketContext>{self.market_context}</emix:marketContext>
</ei:eiMarketContext>
<ei:createdDateTime>{format_datetime(self.created_date)}</ei:createdDateTime>
<ei:eventStatus>{self.status.value}</ei:eventStatus>
<ei:testEvent>{self.test_event}</ei:testEvent>
<ei:vtnComment></ei:vtnComment>
</ei:eventDescriptor>
<ei:eiActivePeriod>
<ical:properties>
<ical:dtstart>
<ical:date-time>{format_datetime(self.start)}</ical:date-time>
</ical:dtstart>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
{start_after}
<ei:x-eiNotification>
<ical:duration>P0Y0M0DT0H0M0S</ical:duration>
</ei:x-eiNotification>
</ical:properties>
<ical:components xsi:nil="true"/>
</ei:eiActivePeriod>
<ei:eiEventSignals>
<ei:eiEventSignal>
<strm:intervals>
{intervals_xml}
</strm:intervals>
<ei:signalName>{self.signal_name}</ei:signalName>
<ei:signalType>level</ei:signalType>
<ei:signalID>SignalID</ei:signalID>
<ei:currentValue>
<ei:payloadFloat>
<ei:value>0.0</ei:value>
</ei:payloadFloat>
</ei:currentValue>
</ei:eiEventSignal>
</ei:eiEventSignals>
<ei:eiTarget>
{ven_xml}
{party_xml}
{resource_xml}
{group_xml}
</ei:eiTarget>
</ei:eiEvent>
<oadrResponseRequired>{"always" if self.response_required else "never"}</oadrResponseRequired>
</oadrEvent>
"""
def generate_payload(event_list, vtn_id="TH_VTN"):
evt_xml = "".join([event.to_xml() for event in event_list])
template = f"""
<oadrDistributeEvent
xmlns="http://openadr.org/oadr-2.0a/2012/07"
xmlns:ei="http://docs.oasis-open.org/ns/energyinterop/201110"
xmlns:emix="http://docs.oasis-open.org/ns/emix/2011/06"
xmlns:pyld="http://docs.oasis-open.org/ns/energyinterop/201110/payloads"
xmlns:strm="urn:ietf:params:xml:ns:icalendar-2.0:stream"
xmlns:ical="urn:ietf:params:xml:ns:icalendar-2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
>
<eiResponse>
<responseCode>200</responseCode>
<pyld:requestID/>
</eiResponse>
<pyld:requestID>OadrDisReq092520_152645_178</pyld:requestID>
<ei:vtnID>{vtn_id}</ei:vtnID>
{evt_xml}
</oadrDistributeEvent>
"""
# print(template)
return etree.fromstring(template)
| from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, List, Optional, Union
from lxml import etree
from oadr2.schemas import EventSchema, SignalSchema
def format_duration(duration: Union[timedelta, None]) -> str:
if not duration:
return "PT0M"
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return f"P0Y0M0DT{hours}H{minutes}M{seconds}S"
def format_datetime(time: datetime) -> str:
return f"{time.isoformat()}Z"
class AdrEventStatus(Enum):
PENDING = "near"
ACTIVE = "active"
CANCELLED = "cancelled"
COMPLETED = "completed"
@dataclass
class AdrInterval:
index: int
level: float
duration: timedelta
def to_xml(self):
return f"""
<ei:interval>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
<ical:uid>
<ical:text>{self.index}</ical:text>
</ical:uid>
<ei:signalPayload>
<ei:payloadFloat>
<ei:value>{self.level}</ei:value>
</ei:payloadFloat>
</ei:signalPayload>
</ei:interval>"""
class AdrEvent:
def __init__(
self,
id: Union[str, None],
start: datetime,
signals: List[Dict[str, Union[float, int, timedelta]]],
status: AdrEventStatus,
mod_number: Optional[int] = 0,
end: Optional[datetime] = None,
start_before: Optional[timedelta] = None,
start_after: Optional[timedelta] = None,
original_start: datetime = None,
cancellation_offset: timedelta = None,
group_ids: Optional[List[str]] = None,
resource_ids: Optional[List[str]] = None,
party_ids: Optional[List[str]] = None,
ven_ids: Optional[List[str]] = ['VEN_ID'],
vtn_id: Optional[str] = "TH_VTN",
market_context: Optional[str] = "http://market.context",
test_event: bool = False,
priority: int = 1,
response_required: bool = True,
signal_name: str = "simple"
):
self.id = id
self.start = start
self.original_start = original_start or start
self.cancellation_offset = cancellation_offset
self.raw_signals = [signal for signal in signals]
self.signals = signals
self.intervals = [AdrInterval(**signal) for signal in self.signals]
self.duration = timedelta()
for signal in self.signals:
self.duration += signal["duration"]
self.end = end or self.start + self.duration
self.group_ids = group_ids
self.resource_ids = resource_ids
self.party_ids = party_ids
self.ven_ids = ven_ids
self.mod_number = mod_number
self.status = status
self.start_before = start_before
self.start_after = start_after
self.vtn_id = vtn_id
self.market_context = market_context
self.created_date = datetime(2020, 1, 1, 10, 10)
self.test_event = test_event
self.priority = priority
self.response_required = response_required
self.signal_name = signal_name
def to_obj(self):
_signals = [
dict(
index=s["index"],
level=s["level"],
duration=format_duration(s["duration"])
) for s in self.signals
]
return EventSchema(
id=self.id,
vtn_id=self.vtn_id,
mod_number=self.mod_number,
start=self.start,
original_start=self.original_start,
end=self.end,
signals=[SignalSchema(**signal) for signal in _signals],
status=self.status.value,
cancellation_offset=format_duration(self.cancellation_offset) if self.cancellation_offset else None,
ven_ids=self.ven_ids,
market_market_context=self.market_context,
group_ids=self.group_ids,
resource_ids=self.resource_ids,
party_ids=self.party_ids,
test_event=self.test_event,
priority=self.priority
)
def to_xml(self):
intervals_xml = "".join([interval.to_xml() for interval in self.intervals])
start_after = f"""<ical:tolerance>
<ical:tolerate>
<ical:startafter>{format_duration(self.start_after)}</ical:startafter>
</ical:tolerate>
</ical:tolerance>""" if self.start_after else ""
ven_xml = f"<ei:venID>{','.join(self.ven_ids)}</ei:venID>" if self.ven_ids else ""
group_xml = f"<ei:groupID>{','.join(self.group_ids)}</ei:groupID>" if self.group_ids else ""
resource_xml = f"<ei:resourceID>{','.join(self.resource_ids)}</ei:resourceID>" if self.resource_ids else ""
party_xml = f"<ei:partyID>{','.join(self.party_ids)}</ei:partyID>" if self.party_ids else ""
return f"""
<oadrEvent>
<ei:eiEvent>
<ei:eventDescriptor>
<ei:eventID>{self.id}</ei:eventID>
<ei:modificationNumber>{self.mod_number}</ei:modificationNumber>
<ei:priority>{self.priority}</ei:priority>
<ei:eiMarketContext>
<emix:marketContext>{self.market_context}</emix:marketContext>
</ei:eiMarketContext>
<ei:createdDateTime>{format_datetime(self.created_date)}</ei:createdDateTime>
<ei:eventStatus>{self.status.value}</ei:eventStatus>
<ei:testEvent>{self.test_event}</ei:testEvent>
<ei:vtnComment></ei:vtnComment>
</ei:eventDescriptor>
<ei:eiActivePeriod>
<ical:properties>
<ical:dtstart>
<ical:date-time>{format_datetime(self.start)}</ical:date-time>
</ical:dtstart>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
{start_after}
<ei:x-eiNotification>
<ical:duration>P0Y0M0DT0H0M0S</ical:duration>
</ei:x-eiNotification>
</ical:properties>
<ical:components xsi:nil="true"/>
</ei:eiActivePeriod>
<ei:eiEventSignals>
<ei:eiEventSignal>
<strm:intervals>
{intervals_xml}
</strm:intervals>
<ei:signalName>{self.signal_name}</ei:signalName>
<ei:signalType>level</ei:signalType>
<ei:signalID>SignalID</ei:signalID>
<ei:currentValue>
<ei:payloadFloat>
<ei:value>0.0</ei:value>
</ei:payloadFloat>
</ei:currentValue>
</ei:eiEventSignal>
</ei:eiEventSignals>
<ei:eiTarget>
{ven_xml}
{party_xml}
{resource_xml}
{group_xml}
</ei:eiTarget>
</ei:eiEvent>
<oadrResponseRequired>{"always" if self.response_required else "never"}</oadrResponseRequired>
</oadrEvent>
"""
def generate_payload(event_list, vtn_id="TH_VTN"):
evt_xml = "".join([event.to_xml() for event in event_list])
template = f"""
<oadrDistributeEvent
xmlns="http://openadr.org/oadr-2.0a/2012/07"
xmlns:ei="http://docs.oasis-open.org/ns/energyinterop/201110"
xmlns:emix="http://docs.oasis-open.org/ns/emix/2011/06"
xmlns:pyld="http://docs.oasis-open.org/ns/energyinterop/201110/payloads"
xmlns:strm="urn:ietf:params:xml:ns:icalendar-2.0:stream"
xmlns:ical="urn:ietf:params:xml:ns:icalendar-2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
>
<eiResponse>
<responseCode>200</responseCode>
<pyld:requestID/>
</eiResponse>
<pyld:requestID>OadrDisReq092520_152645_178</pyld:requestID>
<ei:vtnID>{vtn_id}</ei:vtnID>
{evt_xml}
</oadrDistributeEvent>
"""
# print(template)
return etree.fromstring(template)
|
from marshmallow import Schema, fields, post_load, pre_load
from models import Flight
# from parsers import Parser
from tortoise import Tortoise, run_async
import asyncio
import datetime
class SheremetievoCompanySchema(Schema):
code = fields.String(required=False, allow_none=True)
class SheremetievoSchema(Schema):
t_boarding_start = fields.String(required=False, allow_none=True)
t_st = fields.String(required=False)
t_st_mar = fields.String(required=False, )
estimated_chin_finish = fields.String(required=False, allow_none=True)
flt = fields.String(required=False, allow_none=True)
gate_id = fields.String(required=False, allow_none=True)
old_gate_id = fields.String(required=False, allow_none=True)
term = fields.String(required=False, allow_none=True)
old_term = fields.String(required=False, allow_none=True)
co = fields.Nested(SheremetievoCompanySchema)
airport = fields.Method('get_airport')
number = fields.Method('get_number')
gate = fields.Method('get_gate')
departure_time = fields.Method('get_departure_time')
on_board_time = fields.Method('get_on_board_time')
# t_boarding_start = fields.String()
# t_bording_finish =
# estimated_chin_finish = fields.String()
def get_departure_time(self, instance):
return instance['t_st']
def get_on_board_time(self, instance):
if instance['t_boarding_start'] and not(instance['estimated_chin_finish']):
return instance['t_boarding_start']
elif not(instance['t_boarding_start']) and instance['estimated_chin_finish']:
return instance['estimated_chin_finish']
def get_number(self, instance):
return instance['co']['code'] + ' ' + str(instance['flt'])
def get_gate(self, instance):
if instance['old_gate_id'] and instance['gate_id']:
if instance['gate_id'] != instance['old_gate_id']:
gate_numbers = instance['gate_id']
else:
gate_numbers = instance['gate_id']
elif instance['old_gate_id'] and not(instance['gate_id']):
gate_numbers = instance['old_gate_id']
elif not(instance['old_gate_id']) and instance['gate_id']:
gate_numbers = instance['gate_id']
else:
gate_numbers = ''
if instance['old_term'] and instance['term']:
if instance['term'] != instance['old_term']:
gate_term = instance['term']
else:
gate_term = instance['term']
elif instance['old_term'] and not(instance['term']):
gate_term = instance['old_term']
elif not(instance['old_term']) and instance['term']:
gate_term = instance['term']
else:
gate_term = ''
gate = str(gate_term) + ' ' + str(gate_numbers)
return gate
def get_airport(self, instance):
return 'SVO'
class SheremetievoOutputSchema(Schema):
t_boarding_start = fields.String(required=False, allow_none=True)
t_st = fields.String(required=False, allow_none=True)
t_st_mar = fields.String(required=False, allow_none=True)
estimated_chin_finish = fields.String(required=False, allow_none=True)
airport = fields.String()
gate_id = fields.String(required=False, allow_none=True)
old_gate_id = fields.String(required=False, allow_none=True)
term = fields.String(required=False, allow_none=True)
old_term = fields.String(required=False, allow_none=True)
number = fields.String()
flt = fields.String(required=False, allow_none=True)
co = fields.Nested(SheremetievoCompanySchema)
gate = fields.String(required=False, allow_none=True)
departure_time = fields.String(required=False, allow_none=True)
on_board_time = fields.String(required=False, allow_none=True)
# estimated_chin_finish = fields.String(required=False, allow_none=True)
def get_departure_time(self, instance):
try:
time = datetime.datetime.strptime(instance['t_st'],
'%Y-%m-%dT%H:%M:%S%z')
except Exception:
time = datetime.datetime.now()
return time
def get_on_board_time(self, instance):
try:
time = datetime.datetime.strptime(instance['t_boarding_start'],
'%Y-%m-%dT%H:%M:%S%z')
except Exception:
time = datetime.datetime.strptime(instance['estimated_chin_finish'],
'%Y-%m-%dT%H:%M:%S%z')
return time
# @pre_load
# def get_gate(self, data, **kwargs):
# if data["gate_id"] != data["old_gate_id"]:
# data.update({"gate": data["term_gate"] + str(data["gate_id"])})
# else:
# data.update({"gate": data["term_gate"] + str(data["gate_id"])})
# return data
@post_load
def make_object(self, data, **kwargs):
try:
try:
departure_time = datetime.datetime.strptime(data['t_st'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
except Exception:
departure_time = datetime.datetime.now()
try:
on_board_time = datetime.datetime.strptime(data['t_boarding_start'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
except Exception:
on_board_time = datetime.datetime.strptime(data['on_board_time'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
flight = Flight(airport=data['airport'],
number=data['number'],
on_board_time=on_board_time,
departure_time=departure_time,
gate=data['gate'],
)
return flight
except Exception:
return None
class SheremetievoMainSchema(Schema):
items = fields.List(fields.Nested(SheremetievoSchema, many=True))
class DomodedovoSchema(Schema):
airport = fields.Method('get_airport')
number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
updated_number = fields.Method('update_number')
updated_on_board_time = fields.Method('update_on_board')
updated_departure_time = fields.Method('update_dep')
info = fields.String()
gate = fields.Method('get_gate')
monthes = {
'янв': 1,
'фев': 2,
'мар': 3,
'апр': 4,
'май': 5,
'июн': 6,
'июл': 7,
'авг': 8,
'сен': 9,
'окт': 10,
'ноя': 11,
'дек': 12,
}
def get_gate(self, instance):
if 'Выход изменен на ' in instance['info']:
return instance['info'].split('Выход изменен на ')[1]
elif 'Выход ' in instance['info']:
return instance['info'].split('Выход ')[1]
else:
return 'Неизвестно'
def update_on_board(self, instance):
if instance['on_board_time'] == 'неизвестно':
return ' '
else:
return instance['on_board_time']
def update_dep(self, instance):
temp_date = instance['departure_time'].replace('\r', '').replace('\n', '').replace('\xa0', ' ')
return f'{temp_date.split(' ')[0]} {self.monthes[temp_date.split(' ')[1]]} {datetime.datetime.now().year} {temp_date.split(' ')[2]}'
def update_number(self, instance):
temp_number = instance['number'].replace('\n', '').replace('\r', '')
return temp_number.split(' ')[0] + ' ' + temp_number.split(' ')[1]
def get_airport(self, instance):
return 'DME'
class DomodedovoOutputSchema(Schema):
airport = fields.String()
number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
updated_number = fields.String()
updated_on_board_time = fields.String(allow_none=True)
updated_departure_time = fields.String(allow_none=True)
info = fields.String()
gate = fields.String(allow_none=True, required=False)
@staticmethod
def get_on_board_time_and_departure_from_string(data):
departure_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_departure_time'].split(' ')[3].split(':')[0]),
minute=int(data['updated_departure_time'].split(' ')[3].split(':')[1]))
if data['updated_on_board_time'] == ' ':
on_board_time = datetime.datetime.now()
else:
if int(departure_time.hour) > int(data['updated_on_board_time'].split(':')[0]):
on_board_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_on_board_time'].split(':')[0]),
minute=int(data['updated_on_board_time'].split(':')[1])) \
- datetime.timedelta(days=1)
else:
on_board_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_on_board_time'].split(':')[0]),
minute=int(data['updated_on_board_time'].split(':')[1]))
return departure_time, on_board_time
@post_load
def make_object(self, data, **kwargs):
departure_time, on_board_time = self.get_on_board_time_and_departure_from_string(data)
if data['gate'] != 'Неизвестно':
flight = Flight(number=data['updated_number'], on_board_time=on_board_time,
airport=data['airport'],
departure_time=departure_time,
gate=data['gate'])
else:
flight = Flight(number=data['updated_number'], on_board_time=on_board_time,
airport=data['airport'],
departure_time=departure_time)
return flight
class VnukovoSchema(Schema):
airport = fields.Method('get_airport')
number = fields.String()
flight_number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
gate = fields.String(allow_none=True)
def get_airport(self, instance):
return 'VKO'
class VnukovoOutputSchema(Schema):
airport = fields.String()
number = fields.String()
flight_number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
gate = fields.String(allow_none=True)
@staticmethod
def get_on_board_time_and_departure_from_string(data):
departure_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7]),
hour=int(data['departure_time'].split(':')[0]),
minute=int(data['departure_time'].split(':')[1][2:4]))
if data['on_board_time'] == ' ':
on_board_time = datetime.datetime.now()
else:
if int(departure_time.hour) < int(data['on_board_time'].split(':')[0]):
on_board_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7])-1,
hour=int(data['on_board_time'].split(':')[0]),
minute=int(data['on_board_time'].split(':')[1]))
else:
on_board_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7]),
hour=int(data['on_board_time'].split(':')[0]),
minute=int(data['on_board_time'].split(':')[1][:2]))
return departure_time, on_board_time
@post_load
def make_object(self, data, **kwargs):
departure_time, on_board_time = self.get_on_board_time_and_departure_from_string(data)
try:
flight = Flight(number=data['number'], airport=data['airport'],
on_board_time=on_board_time, departure_time=departure_time, gate=data['gate'])
except KeyError:
flight = Flight(number=data['number'], airport=data['airport'],
on_board_time=on_board_time, departure_time=departure_time)
return flight
def get_airport(self, instance):
return 'VKO'
class TestNestedSchema(Schema):
name = fields.String()
class TestSchema(Schema):
id = fields.Integer()
company = fields.Nested(TestNestedSchema)
company_id = fields.Method('get_company_id')
def get_company_id(self, instance):
return str(instance['id']) + instance['company']['name']
@post_load
def make_object(self):
pass
# if __name__ == '__main__':
# data = {'id': 1, 'dasdas':'dasd',
# 'kek': 'lol', 'company': {"dasda": "zaadsda", 'name': 'SU'}}
#
# schema = VnukovoOutputSchema()
# r = schema.load(data)
# print(r) | from marshmallow import Schema, fields, post_load, pre_load
from models import Flight
# from parsers import Parser
from tortoise import Tortoise, run_async
import asyncio
import datetime
class SheremetievoCompanySchema(Schema):
code = fields.String(required=False, allow_none=True)
class SheremetievoSchema(Schema):
t_boarding_start = fields.String(required=False, allow_none=True)
t_st = fields.String(required=False)
t_st_mar = fields.String(required=False, )
estimated_chin_finish = fields.String(required=False, allow_none=True)
flt = fields.String(required=False, allow_none=True)
gate_id = fields.String(required=False, allow_none=True)
old_gate_id = fields.String(required=False, allow_none=True)
term = fields.String(required=False, allow_none=True)
old_term = fields.String(required=False, allow_none=True)
co = fields.Nested(SheremetievoCompanySchema)
airport = fields.Method('get_airport')
number = fields.Method('get_number')
gate = fields.Method('get_gate')
departure_time = fields.Method('get_departure_time')
on_board_time = fields.Method('get_on_board_time')
# t_boarding_start = fields.String()
# t_bording_finish =
# estimated_chin_finish = fields.String()
def get_departure_time(self, instance):
return instance['t_st']
def get_on_board_time(self, instance):
if instance['t_boarding_start'] and not(instance['estimated_chin_finish']):
return instance['t_boarding_start']
elif not(instance['t_boarding_start']) and instance['estimated_chin_finish']:
return instance['estimated_chin_finish']
def get_number(self, instance):
return instance['co']['code'] + ' ' + str(instance['flt'])
def get_gate(self, instance):
if instance['old_gate_id'] and instance['gate_id']:
if instance['gate_id'] != instance['old_gate_id']:
gate_numbers = instance['gate_id']
else:
gate_numbers = instance['gate_id']
elif instance['old_gate_id'] and not(instance['gate_id']):
gate_numbers = instance['old_gate_id']
elif not(instance['old_gate_id']) and instance['gate_id']:
gate_numbers = instance['gate_id']
else:
gate_numbers = ''
if instance['old_term'] and instance['term']:
if instance['term'] != instance['old_term']:
gate_term = instance['term']
else:
gate_term = instance['term']
elif instance['old_term'] and not(instance['term']):
gate_term = instance['old_term']
elif not(instance['old_term']) and instance['term']:
gate_term = instance['term']
else:
gate_term = ''
gate = str(gate_term) + ' ' + str(gate_numbers)
return gate
def get_airport(self, instance):
return 'SVO'
class SheremetievoOutputSchema(Schema):
t_boarding_start = fields.String(required=False, allow_none=True)
t_st = fields.String(required=False, allow_none=True)
t_st_mar = fields.String(required=False, allow_none=True)
estimated_chin_finish = fields.String(required=False, allow_none=True)
airport = fields.String()
gate_id = fields.String(required=False, allow_none=True)
old_gate_id = fields.String(required=False, allow_none=True)
term = fields.String(required=False, allow_none=True)
old_term = fields.String(required=False, allow_none=True)
number = fields.String()
flt = fields.String(required=False, allow_none=True)
co = fields.Nested(SheremetievoCompanySchema)
gate = fields.String(required=False, allow_none=True)
departure_time = fields.String(required=False, allow_none=True)
on_board_time = fields.String(required=False, allow_none=True)
# estimated_chin_finish = fields.String(required=False, allow_none=True)
def get_departure_time(self, instance):
try:
time = datetime.datetime.strptime(instance['t_st'],
'%Y-%m-%dT%H:%M:%S%z')
except Exception:
time = datetime.datetime.now()
return time
def get_on_board_time(self, instance):
try:
time = datetime.datetime.strptime(instance['t_boarding_start'],
'%Y-%m-%dT%H:%M:%S%z')
except Exception:
time = datetime.datetime.strptime(instance['estimated_chin_finish'],
'%Y-%m-%dT%H:%M:%S%z')
return time
# @pre_load
# def get_gate(self, data, **kwargs):
# if data["gate_id"] != data["old_gate_id"]:
# data.update({"gate": data["term_gate"] + str(data["gate_id"])})
# else:
# data.update({"gate": data["term_gate"] + str(data["gate_id"])})
# return data
@post_load
def make_object(self, data, **kwargs):
try:
try:
departure_time = datetime.datetime.strptime(data['t_st'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
except Exception:
departure_time = datetime.datetime.now()
try:
on_board_time = datetime.datetime.strptime(data['t_boarding_start'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
except Exception:
on_board_time = datetime.datetime.strptime(data['on_board_time'],
'%Y-%m-%dT%H:%M:%S%z') + datetime.timedelta(hours=3)
flight = Flight(airport=data['airport'],
number=data['number'],
on_board_time=on_board_time,
departure_time=departure_time,
gate=data['gate'],
)
return flight
except Exception:
return None
class SheremetievoMainSchema(Schema):
items = fields.List(fields.Nested(SheremetievoSchema, many=True))
class DomodedovoSchema(Schema):
airport = fields.Method('get_airport')
number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
updated_number = fields.Method('update_number')
updated_on_board_time = fields.Method('update_on_board')
updated_departure_time = fields.Method('update_dep')
info = fields.String()
gate = fields.Method('get_gate')
monthes = {
'янв': 1,
'фев': 2,
'мар': 3,
'апр': 4,
'май': 5,
'июн': 6,
'июл': 7,
'авг': 8,
'сен': 9,
'окт': 10,
'ноя': 11,
'дек': 12,
}
def get_gate(self, instance):
if 'Выход изменен на ' in instance['info']:
return instance['info'].split('Выход изменен на ')[1]
elif 'Выход ' in instance['info']:
return instance['info'].split('Выход ')[1]
else:
return 'Неизвестно'
def update_on_board(self, instance):
if instance['on_board_time'] == 'неизвестно':
return ' '
else:
return instance['on_board_time']
def update_dep(self, instance):
temp_date = instance['departure_time'].replace('\r', '').replace('\n', '').replace('\xa0', ' ')
return f'{temp_date.split(" ")[0]} {self.monthes[temp_date.split(" ")[1]]} {datetime.datetime.now().year} {temp_date.split(" ")[2]}'
def update_number(self, instance):
temp_number = instance['number'].replace('\n', '').replace('\r', '')
return temp_number.split(' ')[0] + ' ' + temp_number.split(' ')[1]
def get_airport(self, instance):
return 'DME'
class DomodedovoOutputSchema(Schema):
airport = fields.String()
number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
updated_number = fields.String()
updated_on_board_time = fields.String(allow_none=True)
updated_departure_time = fields.String(allow_none=True)
info = fields.String()
gate = fields.String(allow_none=True, required=False)
@staticmethod
def get_on_board_time_and_departure_from_string(data):
departure_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_departure_time'].split(' ')[3].split(':')[0]),
minute=int(data['updated_departure_time'].split(' ')[3].split(':')[1]))
if data['updated_on_board_time'] == ' ':
on_board_time = datetime.datetime.now()
else:
if int(departure_time.hour) > int(data['updated_on_board_time'].split(':')[0]):
on_board_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_on_board_time'].split(':')[0]),
minute=int(data['updated_on_board_time'].split(':')[1])) \
- datetime.timedelta(days=1)
else:
on_board_time = datetime.datetime(year=int(data['updated_departure_time'].split(' ')[2]),
month=int(data['updated_departure_time'].split(' ')[1]),
day=int(data['updated_departure_time'].split(' ')[0]),
hour=int(data['updated_on_board_time'].split(':')[0]),
minute=int(data['updated_on_board_time'].split(':')[1]))
return departure_time, on_board_time
@post_load
def make_object(self, data, **kwargs):
departure_time, on_board_time = self.get_on_board_time_and_departure_from_string(data)
if data['gate'] != 'Неизвестно':
flight = Flight(number=data['updated_number'], on_board_time=on_board_time,
airport=data['airport'],
departure_time=departure_time,
gate=data['gate'])
else:
flight = Flight(number=data['updated_number'], on_board_time=on_board_time,
airport=data['airport'],
departure_time=departure_time)
return flight
class VnukovoSchema(Schema):
airport = fields.Method('get_airport')
number = fields.String()
flight_number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
gate = fields.String(allow_none=True)
def get_airport(self, instance):
return 'VKO'
class VnukovoOutputSchema(Schema):
airport = fields.String()
number = fields.String()
flight_number = fields.String()
on_board_time = fields.String()
departure_time = fields.String()
gate = fields.String(allow_none=True)
@staticmethod
def get_on_board_time_and_departure_from_string(data):
departure_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7]),
hour=int(data['departure_time'].split(':')[0]),
minute=int(data['departure_time'].split(':')[1][2:4]))
if data['on_board_time'] == ' ':
on_board_time = datetime.datetime.now()
else:
if int(departure_time.hour) < int(data['on_board_time'].split(':')[0]):
on_board_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7])-1,
hour=int(data['on_board_time'].split(':')[0]),
minute=int(data['on_board_time'].split(':')[1]))
else:
on_board_time = datetime.datetime(year=int(data['departure_time'].split('.')[2]),
month=int(data['departure_time'].split('.')[1]),
day=int(data['departure_time'].split('.')[0][5:7]),
hour=int(data['on_board_time'].split(':')[0]),
minute=int(data['on_board_time'].split(':')[1][:2]))
return departure_time, on_board_time
@post_load
def make_object(self, data, **kwargs):
departure_time, on_board_time = self.get_on_board_time_and_departure_from_string(data)
try:
flight = Flight(number=data['number'], airport=data['airport'],
on_board_time=on_board_time, departure_time=departure_time, gate=data['gate'])
except KeyError:
flight = Flight(number=data['number'], airport=data['airport'],
on_board_time=on_board_time, departure_time=departure_time)
return flight
def get_airport(self, instance):
return 'VKO'
class TestNestedSchema(Schema):
name = fields.String()
class TestSchema(Schema):
id = fields.Integer()
company = fields.Nested(TestNestedSchema)
company_id = fields.Method('get_company_id')
def get_company_id(self, instance):
return str(instance['id']) + instance['company']['name']
@post_load
def make_object(self):
pass
# if __name__ == '__main__':
# data = {'id': 1, 'dasdas':'dasd',
# 'kek': 'lol', 'company': {"dasda": "zaadsda", 'name': 'SU'}}
#
# schema = VnukovoOutputSchema()
# r = schema.load(data)
# print(r) |
"""管理用户状态,包括但不限于查看签到状态,执行签到等"""
__all__ = ['apis_account_verify', 'check_display_state', 'stu_twqd']
import json
import requests
from src.BusinessCentralLayer.setting import OSH_STATUS_CODE, logger
from src.BusinessLogicLayer.cluster.osh_runner import runner
def apis_account_verify(user: dict):
"""
验证今日校园账号正确
:param user: username password
:return:
"""
logging_api = "http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login"
apis_ = {
'login-url': 'https://authserver.hainanu.edu.cn/authserver/login?service=https%3A%2F%2Fhainanu.campusphere.net%2Fiap%2FloginSuccess%3FsessionToken%3Df73b49371c0d4669aea95af37347e9fe',
'host': 'hainanu.campusphere.net'
}
params = {
'login_url': apis_['login-url'],
'needcaptcha_url': '',
'captcha_url': '',
'username': user['username'],
'password': user['password']
}
cookies = dict()
try:
res = requests.post(url=logging_api, data=params, timeout=2)
cookie_str = str(res.json()['cookies'])
if cookie_str == 'None':
if "网页中没有找到casLoginForm" in res.json()['msg']:
return None
else:
return False
# 解析cookie
for line in cookie_str.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
session = requests.session()
session.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar=None, overwrite=True)
if session:
return {'msg': 'success', 'info': 'Verified successfully'}
else:
return {'info': 'Incorrect username or password'}
except json.decoder.JSONDecodeError or requests.exceptions.ProxyError:
logger.warning("目标或存在鉴权行为,请关闭本地网络代理")
def check_display_state(user: dict, debug=False, _date=0) -> dict:
"""
查询用户签到状态
:param _date:
:param user: only username
:param debug:
:return:
"""
params = runner(debug=debug).get_stu_temp_report_data(
username=user["username"],
only_check_status=True,
_date=_date
)
if isinstance(params, int):
response = {'code': params, 'info': OSH_STATUS_CODE[params]}
return response
def stu_twqd(user: dict, cover=False):
"""
用于外部接口体温签到
:param cover:
:param user:仅传入username,越权;传入username and password 则使用该用户的cookie 进行操作
:return:
"""
params = runner(cover=cover, debug=False).run(user)
response = {'code': params[0], 'info': f"{user["username"]} -- {OSH_STATUS_CODE[params[0]]}"}
return response
| """管理用户状态,包括但不限于查看签到状态,执行签到等"""
__all__ = ['apis_account_verify', 'check_display_state', 'stu_twqd']
import json
import requests
from src.BusinessCentralLayer.setting import OSH_STATUS_CODE, logger
from src.BusinessLogicLayer.cluster.osh_runner import runner
def apis_account_verify(user: dict):
"""
验证今日校园账号正确
:param user: username password
:return:
"""
logging_api = "http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login"
apis_ = {
'login-url': 'https://authserver.hainanu.edu.cn/authserver/login?service=https%3A%2F%2Fhainanu.campusphere.net%2Fiap%2FloginSuccess%3FsessionToken%3Df73b49371c0d4669aea95af37347e9fe',
'host': 'hainanu.campusphere.net'
}
params = {
'login_url': apis_['login-url'],
'needcaptcha_url': '',
'captcha_url': '',
'username': user['username'],
'password': user['password']
}
cookies = dict()
try:
res = requests.post(url=logging_api, data=params, timeout=2)
cookie_str = str(res.json()['cookies'])
if cookie_str == 'None':
if "网页中没有找到casLoginForm" in res.json()['msg']:
return None
else:
return False
# 解析cookie
for line in cookie_str.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
session = requests.session()
session.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar=None, overwrite=True)
if session:
return {'msg': 'success', 'info': 'Verified successfully'}
else:
return {'info': 'Incorrect username or password'}
except json.decoder.JSONDecodeError or requests.exceptions.ProxyError:
logger.warning("目标或存在鉴权行为,请关闭本地网络代理")
def check_display_state(user: dict, debug=False, _date=0) -> dict:
"""
查询用户签到状态
:param _date:
:param user: only username
:param debug:
:return:
"""
params = runner(debug=debug).get_stu_temp_report_data(
username=user["username"],
only_check_status=True,
_date=_date
)
if isinstance(params, int):
response = {'code': params, 'info': OSH_STATUS_CODE[params]}
return response
def stu_twqd(user: dict, cover=False):
"""
用于外部接口体温签到
:param cover:
:param user:仅传入username,越权;传入username and password 则使用该用户的cookie 进行操作
:return:
"""
params = runner(cover=cover, debug=False).run(user)
response = {'code': params[0], 'info': f"{user['username']} -- {OSH_STATUS_CODE[params[0]]}"}
return response
|
import csv
import json
import locale
import logging
import pathlib
import sys
import warnings
from typing import Any, Dict, Iterator, Optional, Set, Union
from requests.cookies import cookiejar_from_dict
from .constants import DEFAULT_REQUESTS_TIMEOUT
from .facebook_scraper import FacebookScraper
from .fb_types import Credentials, Post, RawPost, Profile
from .utils import html_element_to_string, parse_cookie_file
from . import exceptions
import traceback
import time
from datetime import datetime, timedelta
import re
import browser_cookie3
_scraper = FacebookScraper()
def set_cookies(cookies):
if isinstance(cookies, str):
if cookies == "from_browser":
cookies = browser_cookie3.load(domain_name='.facebook.com')
else:
try:
cookies = parse_cookie_file(cookies)
except ValueError as e:
raise exceptions.InvalidCookies(f"Cookies are in an invalid format: {e}")
elif isinstance(cookies, dict):
cookies = cookiejar_from_dict(cookies)
if cookies is not None:
cookie_names = [c.name for c in cookies]
missing_cookies = [c for c in ['c_user', 'xs'] if c not in cookie_names]
if missing_cookies:
raise exceptions.InvalidCookies(f"Missing cookies with name(s): {missing_cookies}")
_scraper.session.cookies.update(cookies)
if not _scraper.is_logged_in():
raise exceptions.InvalidCookies(f"Cookies are not valid")
def unset_cookies():
# Explicitly unset cookies to return to unauthenticated requests
_scraper.session.cookies = cookiejar_from_dict({})
def set_proxy(proxy):
_scraper.set_proxy(proxy)
def set_user_agent(user_agent):
_scraper.set_user_agent(user_agent)
def set_noscript(noscript):
_scraper.set_noscript(noscript)
def get_profile(
account: str,
**kwargs,
) -> Profile:
"""Get a Facebook user's profile information
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_profile(account, **kwargs)
def get_friends(
account: str,
**kwargs,
) -> Iterator[Profile]:
"""Get a Facebook user's friends
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_friends(account, **kwargs)
def get_page_info(account: str, **kwargs) -> Profile:
"""Get a page's information
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_page_info(account, **kwargs)
def get_group_info(group: Union[str, int], **kwargs) -> Profile:
"""Get a group's profile information
Args:
group(str or int): The group name or ID
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_group_info(group, **kwargs)
def get_shop(account: str, **kwargs) -> Iterator[Post]:
"""Get a page's shop listings
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_shop(account, **kwargs)
def get_posts(
account: Optional[str] = None,
group: Union[str, int, None] = None,
post_urls: Optional[Iterator[str]] = None,
hashtag: Optional[str] = None,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get posts from a Facebook page or group.
Args:
account (str): The account of the page.
group (int): The group id.
post_urls ([str]): List of manually specified post URLs.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
valid_args = sum(arg is not None for arg in (account, group, post_urls, hashtag))
if valid_args != 1:
raise ValueError("You need to specify either account, group, or post_urls")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('account', account)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
if "reactions" not in options:
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
if account is not None:
return _scraper.get_posts(account, **kwargs)
elif group is not None:
return _scraper.get_group_posts(group, **kwargs)
elif hashtag is not None:
return _scraper.get_posts_by_hashtag(hashtag, **kwargs)
elif post_urls is not None:
return _scraper.get_posts_by_url(post_urls, **kwargs)
raise ValueError('No account nor group')
def get_photos(
account: str,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get photo posts from a Facebook page.
Args:
account (str): The account of the page.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
if account is None:
raise ValueError("You need to specify account")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('account', account)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
return _scraper.get_photos(account, **kwargs)
def get_posts_by_search(
word: str,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get posts from a Facebook page or group.
Args:
word (str): The word for searching posts.
group (int): The group id.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
if not word:
raise ValueError("You need to specify word")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('word', word)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
if "reactions" not in options:
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
if word is not None:
return _scraper.get_posts_by_search(word, **kwargs)
raise ValueError('No account nor group')
def write_post_to_disk(post: Post, source: RawPost, location: pathlib.Path):
post_id = post['post_id']
filename = f'{post_id}.html'
logger.debug("Writing post %s", post_id)
with open(location.joinpath(filename), mode='wt') as f:
f.write('<!--\n')
json.dump(post, f, indent=4, default=str)
f.write('\n-->\n')
f.write(html_element_to_string(source, pretty=True))
def write_posts_to_csv(
account: Optional[str] = None,
group: Union[str, int, None] = None,
filename: str = None,
encoding: str = None,
**kwargs,
):
"""Write posts from an account or group to a CSV or JSON file
Args:
account (str): Facebook account name e.g. "nike" or "nintendo"
group (Union[str, int, None]): Facebook group id e.g. 676845025728409
filename (str): Filename, defaults to <account or group>_posts.csv
encoding (str): Encoding for the output file, defaults to locale.getpreferredencoding()
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping. Defaults to scrape anonymously
timeout (Optional[int]): Timeout for requests.
page_limit (Optional[int]): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (Optional[bool]): Set to True to try to get reactions.
dump_location (Optional[pathlib.Path]): Location where to write the HTML source of the posts.
"""
dump_location = kwargs.pop('dump_location', None) # For dumping HTML to disk, for debugging
if dump_location is not None:
dump_location.mkdir(exist_ok=True)
# Set a default filename, based on the account name with the appropriate extension
if filename is None:
filename = str(account or group) + "_posts." + kwargs.get("format")
if encoding is None:
encoding = locale.getpreferredencoding()
if filename == "-":
output_file = sys.stdout
else:
output_file = open(filename, 'w', newline='', encoding=encoding)
first_post = True
sleep = kwargs.pop("sleep", 0)
days_limit = kwargs.get("days_limit", 3650)
max_post_time = datetime.now() - timedelta(days=days_limit)
start_url = None
resume_file = kwargs.get("resume_file")
if resume_file:
try:
with open(resume_file, "r") as f:
existing_url = f.readline().strip()
logger.debug("Existing URL:" + existing_url)
if existing_url:
start_url = existing_url
except FileNotFoundError:
pass
def handle_pagination_url(url):
if resume_file:
with open(resume_file, "w") as f:
f.write(url + "\n")
keys = kwargs.get("keys")
try:
for post in get_posts(
account=account,
group=group,
start_url=start_url,
request_url_callback=handle_pagination_url,
remove_source=not bool(dump_location),
**kwargs,
):
if dump_location is not None:
source = post.pop('source')
try:
write_post_to_disk(post, source, dump_location)
except Exception:
logger.exception("Error writing post to disk")
if first_post:
if kwargs.get("format") == "json":
output_file.write("[\n")
else:
if not keys:
keys = list(post.keys())
dict_writer = csv.DictWriter(output_file, keys, extrasaction='ignore')
dict_writer.writeheader()
else:
if kwargs.get("format") == "json":
output_file.write(",")
match = None
if post["text"]:
match = re.search(kwargs.get("matching"), post["text"], flags=re.IGNORECASE)
if kwargs.get("not_matching") and re.search(
kwargs.get("not_matching"), post["text"], flags=re.IGNORECASE
):
match = None
if match:
if kwargs.get("format") == "json":
if keys:
post = {k: v for k, v in post.items() if k in keys}
json.dump(post, output_file, default=str, indent=4)
else:
dict_writer.writerow(post)
if not first_post and post["time"] and post["time"] < max_post_time:
logger.debug(
f"Reached days_limit - {post["time"]} is more than {days_limit} days old (older than {max_post_time})"
)
break
first_post = False
time.sleep(sleep)
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exc()
if kwargs.get("format") == "json":
output_file.write("\n]")
if first_post:
print("Couldn't get any posts.", file=sys.stderr)
output_file.close()
def enable_logging(level=logging.DEBUG):
handler = logging.StreamHandler()
handler.setLevel(level)
logger.addHandler(handler)
logger.setLevel(level)
# Disable logging by default
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler()) | import csv
import json
import locale
import logging
import pathlib
import sys
import warnings
from typing import Any, Dict, Iterator, Optional, Set, Union
from requests.cookies import cookiejar_from_dict
from .constants import DEFAULT_REQUESTS_TIMEOUT
from .facebook_scraper import FacebookScraper
from .fb_types import Credentials, Post, RawPost, Profile
from .utils import html_element_to_string, parse_cookie_file
from . import exceptions
import traceback
import time
from datetime import datetime, timedelta
import re
import browser_cookie3
_scraper = FacebookScraper()
def set_cookies(cookies):
if isinstance(cookies, str):
if cookies == "from_browser":
cookies = browser_cookie3.load(domain_name='.facebook.com')
else:
try:
cookies = parse_cookie_file(cookies)
except ValueError as e:
raise exceptions.InvalidCookies(f"Cookies are in an invalid format: {e}")
elif isinstance(cookies, dict):
cookies = cookiejar_from_dict(cookies)
if cookies is not None:
cookie_names = [c.name for c in cookies]
missing_cookies = [c for c in ['c_user', 'xs'] if c not in cookie_names]
if missing_cookies:
raise exceptions.InvalidCookies(f"Missing cookies with name(s): {missing_cookies}")
_scraper.session.cookies.update(cookies)
if not _scraper.is_logged_in():
raise exceptions.InvalidCookies(f"Cookies are not valid")
def unset_cookies():
# Explicitly unset cookies to return to unauthenticated requests
_scraper.session.cookies = cookiejar_from_dict({})
def set_proxy(proxy):
_scraper.set_proxy(proxy)
def set_user_agent(user_agent):
_scraper.set_user_agent(user_agent)
def set_noscript(noscript):
_scraper.set_noscript(noscript)
def get_profile(
account: str,
**kwargs,
) -> Profile:
"""Get a Facebook user's profile information
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_profile(account, **kwargs)
def get_friends(
account: str,
**kwargs,
) -> Iterator[Profile]:
"""Get a Facebook user's friends
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_friends(account, **kwargs)
def get_page_info(account: str, **kwargs) -> Profile:
"""Get a page's information
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_page_info(account, **kwargs)
def get_group_info(group: Union[str, int], **kwargs) -> Profile:
"""Get a group's profile information
Args:
group(str or int): The group name or ID
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_group_info(group, **kwargs)
def get_shop(account: str, **kwargs) -> Iterator[Post]:
"""Get a page's shop listings
Args:
account(str): The account of the profile.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
"""
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
set_cookies(cookies)
return _scraper.get_shop(account, **kwargs)
def get_posts(
account: Optional[str] = None,
group: Union[str, int, None] = None,
post_urls: Optional[Iterator[str]] = None,
hashtag: Optional[str] = None,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get posts from a Facebook page or group.
Args:
account (str): The account of the page.
group (int): The group id.
post_urls ([str]): List of manually specified post URLs.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
valid_args = sum(arg is not None for arg in (account, group, post_urls, hashtag))
if valid_args != 1:
raise ValueError("You need to specify either account, group, or post_urls")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('account', account)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
if "reactions" not in options:
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
if account is not None:
return _scraper.get_posts(account, **kwargs)
elif group is not None:
return _scraper.get_group_posts(group, **kwargs)
elif hashtag is not None:
return _scraper.get_posts_by_hashtag(hashtag, **kwargs)
elif post_urls is not None:
return _scraper.get_posts_by_url(post_urls, **kwargs)
raise ValueError('No account nor group')
def get_photos(
account: str,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get photo posts from a Facebook page.
Args:
account (str): The account of the page.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
if account is None:
raise ValueError("You need to specify account")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('account', account)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
return _scraper.get_photos(account, **kwargs)
def get_posts_by_search(
word: str,
credentials: Optional[Credentials] = None,
**kwargs,
) -> Iterator[Post]:
"""Get posts from a Facebook page or group.
Args:
word (str): The word for searching posts.
group (int): The group id.
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping.
timeout (int): Timeout for requests.
page_limit (int): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (bool): Set to True to try to get reactions.
youtube_dl (bool): Use Youtube-DL for video extraction.
cookies (Union[dict, CookieJar, str]): Cookie jar to use.
Can also be a filename to load the cookies from a file (Netscape format).
Yields:
dict: The post representation in a dictionary.
"""
if not word:
raise ValueError("You need to specify word")
_scraper.requests_kwargs['timeout'] = kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)
cookies = kwargs.pop('cookies', None)
if cookies is not None and credentials is not None:
raise ValueError("Can't use cookies and credentials arguments at the same time")
set_cookies(cookies)
options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {})
if isinstance(options, set):
warnings.warn("The options argument should be a dictionary.", stacklevel=2)
options = {k: True for k in options}
options.setdefault('word', word)
# TODO: Add a better throttling mechanism
if 'sleep' in kwargs:
warnings.warn(
"The sleep parameter has been removed, it won't have any effect.", stacklevel=2
)
kwargs.pop('sleep')
# TODO: Deprecate `pages` in favor of `page_limit` since it is less confusing
if 'pages' in kwargs:
kwargs['page_limit'] = kwargs.pop('pages')
# TODO: Deprecate `extra_info` in favor of `options`
if "reactions" not in options:
options['reactions'] = kwargs.pop('extra_info', False)
options['youtube_dl'] = kwargs.pop('youtube_dl', False)
if credentials is not None:
_scraper.login(*credentials)
if word is not None:
return _scraper.get_posts_by_search(word, **kwargs)
raise ValueError('No account nor group')
def write_post_to_disk(post: Post, source: RawPost, location: pathlib.Path):
post_id = post['post_id']
filename = f'{post_id}.html'
logger.debug("Writing post %s", post_id)
with open(location.joinpath(filename), mode='wt') as f:
f.write('<!--\n')
json.dump(post, f, indent=4, default=str)
f.write('\n-->\n')
f.write(html_element_to_string(source, pretty=True))
def write_posts_to_csv(
account: Optional[str] = None,
group: Union[str, int, None] = None,
filename: str = None,
encoding: str = None,
**kwargs,
):
"""Write posts from an account or group to a CSV or JSON file
Args:
account (str): Facebook account name e.g. "nike" or "nintendo"
group (Union[str, int, None]): Facebook group id e.g. 676845025728409
filename (str): Filename, defaults to <account or group>_posts.csv
encoding (str): Encoding for the output file, defaults to locale.getpreferredencoding()
credentials (Optional[Tuple[str, str]]): Tuple of email and password to login before scraping. Defaults to scrape anonymously
timeout (Optional[int]): Timeout for requests.
page_limit (Optional[int]): How many pages of posts to go through.
Use None to try to get all of them.
extra_info (Optional[bool]): Set to True to try to get reactions.
dump_location (Optional[pathlib.Path]): Location where to write the HTML source of the posts.
"""
dump_location = kwargs.pop('dump_location', None) # For dumping HTML to disk, for debugging
if dump_location is not None:
dump_location.mkdir(exist_ok=True)
# Set a default filename, based on the account name with the appropriate extension
if filename is None:
filename = str(account or group) + "_posts." + kwargs.get("format")
if encoding is None:
encoding = locale.getpreferredencoding()
if filename == "-":
output_file = sys.stdout
else:
output_file = open(filename, 'w', newline='', encoding=encoding)
first_post = True
sleep = kwargs.pop("sleep", 0)
days_limit = kwargs.get("days_limit", 3650)
max_post_time = datetime.now() - timedelta(days=days_limit)
start_url = None
resume_file = kwargs.get("resume_file")
if resume_file:
try:
with open(resume_file, "r") as f:
existing_url = f.readline().strip()
logger.debug("Existing URL:" + existing_url)
if existing_url:
start_url = existing_url
except FileNotFoundError:
pass
def handle_pagination_url(url):
if resume_file:
with open(resume_file, "w") as f:
f.write(url + "\n")
keys = kwargs.get("keys")
try:
for post in get_posts(
account=account,
group=group,
start_url=start_url,
request_url_callback=handle_pagination_url,
remove_source=not bool(dump_location),
**kwargs,
):
if dump_location is not None:
source = post.pop('source')
try:
write_post_to_disk(post, source, dump_location)
except Exception:
logger.exception("Error writing post to disk")
if first_post:
if kwargs.get("format") == "json":
output_file.write("[\n")
else:
if not keys:
keys = list(post.keys())
dict_writer = csv.DictWriter(output_file, keys, extrasaction='ignore')
dict_writer.writeheader()
else:
if kwargs.get("format") == "json":
output_file.write(",")
match = None
if post["text"]:
match = re.search(kwargs.get("matching"), post["text"], flags=re.IGNORECASE)
if kwargs.get("not_matching") and re.search(
kwargs.get("not_matching"), post["text"], flags=re.IGNORECASE
):
match = None
if match:
if kwargs.get("format") == "json":
if keys:
post = {k: v for k, v in post.items() if k in keys}
json.dump(post, output_file, default=str, indent=4)
else:
dict_writer.writerow(post)
if not first_post and post["time"] and post["time"] < max_post_time:
logger.debug(
f"Reached days_limit - {post['time']} is more than {days_limit} days old (older than {max_post_time})"
)
break
first_post = False
time.sleep(sleep)
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exc()
if kwargs.get("format") == "json":
output_file.write("\n]")
if first_post:
print("Couldn't get any posts.", file=sys.stderr)
output_file.close()
def enable_logging(level=logging.DEBUG):
handler = logging.StreamHandler()
handler.setLevel(level)
logger.addHandler(handler)
logger.setLevel(level)
# Disable logging by default
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler()) |
from nbconvert import RSTExporter
from prose.io import get_files
import base64
import os
from os import path
import shutil
from traitlets.config import Config
c = Config()
c.RegexRemovePreprocessor.patterns = ["# hidden"]
rst = RSTExporter(config=c)
def save_image(destination, imstring):
open(destination, 'wb').write(imstring)
def convert_ipynb(filename, destination):
body, resources = rst.from_filename(filename)
basename = path.basename(filename)[:-6]
destination = path.join(destination, basename)
if path.exists(destination):
shutil.rmtree(destination)
os.mkdir(destination)
for imname, imstring in resources['outputs'].items():
save_image(path.join(destination, imname), imstring)
open(path.join(destination, f"{basename}.rst"), "w").write(
body.replace("../_static", "../../_static")
)
import inspect
from os import path
from prose import blocks, Block
rst_docs = get_files(".rst", "blocks")
rst_docs_names = [f.split("/")[-1][0:-4] for f in rst_docs]
for name, obj in inspect.getmembers(blocks):
if inspect.isclass(obj):
if issubclass(obj, Block):
if name not in rst_docs_names:
filename = path.join("./blocks/others", "{}.rst".format(name))
with open(filename, "w") as f:
f.write(":orphan:\n")
f.write(f"{name}\n{"-"*len(name)}")
f.write(f"\n\n.. autoclass:: prose.blocks.{name}\n\t:members:")
print(name)
| from nbconvert import RSTExporter
from prose.io import get_files
import base64
import os
from os import path
import shutil
from traitlets.config import Config
c = Config()
c.RegexRemovePreprocessor.patterns = ["# hidden"]
rst = RSTExporter(config=c)
def save_image(destination, imstring):
open(destination, 'wb').write(imstring)
def convert_ipynb(filename, destination):
body, resources = rst.from_filename(filename)
basename = path.basename(filename)[:-6]
destination = path.join(destination, basename)
if path.exists(destination):
shutil.rmtree(destination)
os.mkdir(destination)
for imname, imstring in resources['outputs'].items():
save_image(path.join(destination, imname), imstring)
open(path.join(destination, f"{basename}.rst"), "w").write(
body.replace("../_static", "../../_static")
)
import inspect
from os import path
from prose import blocks, Block
rst_docs = get_files(".rst", "blocks")
rst_docs_names = [f.split("/")[-1][0:-4] for f in rst_docs]
for name, obj in inspect.getmembers(blocks):
if inspect.isclass(obj):
if issubclass(obj, Block):
if name not in rst_docs_names:
filename = path.join("./blocks/others", "{}.rst".format(name))
with open(filename, "w") as f:
f.write(":orphan:\n")
f.write(f"{name}\n{'-'*len(name)}")
f.write(f"\n\n.. autoclass:: prose.blocks.{name}\n\t:members:")
print(name)
|
# stdlib
import json
# lib
from jnpr.junos import Device
from lxml import etree
import xmltodict
# local
from bin import Hosts
from bin.utils import (
colour_print,
error,
line_break,
)
import settings
class Learner(Hosts):
"""
Command: learner
Desc: inherits hosts from Hosts and learns about its information
"""
def __init__(self):
Hosts.__init__(self)
def learner(self, password: str):
self.prompt()
confirm = input(' ').lower()
if confirm != 'y':
return None
line_break()
for host in self.juniper_hosts:
print(
host['host_name'],
host['ip'],
host['username'],
)
dev = Device(
host=host['ip'],
user=host['username'],
password=password,
port=22,
)
try:
dev.open()
interfaces = dev.rpc.get_interface_information(
terse=True,
)
dev.close()
interfaces = etree.tostring(interfaces)
parsed_dict = dict(xmltodict.parse(interfaces))
parsed_dict = parsed_dict['interface-information']
parsed_list = parsed_dict['physical-interface']
with open(f'{settings.PORTS_PATH}{host['host_name']}.json', 'w+') as file:
for item in parsed_list:
try:
del item['logical-interface']
except KeyError:
pass
file.write(f'{json.dumps(item)}\n')
print(f'{settings.PORTS_PATH}{host['host_name']}.json saved')
line_break()
except Exception as err:
error(f'Unable to find the Host {err}')
line_break()
line_break()
for host in self.rocky_hosts:
print(
host['host_name'],
host['ip'],
host['username'],
)
# call devices
dev = Device(
host=host['ip'],
user=host['username'],
password=password,
port=22,
)
try:
dev.open()
interfaces = dev.rpc.get_interface_information(
terse=True,
)
dev.close()
interfaces = etree.tostring(interfaces)
parsed_dict = dict(xmltodict.parse(interfaces))
# Strip out only 'interface-information'
parsed_dict = parsed_dict['interface-information']
# Strip out only 'physical-interface'
parsed_list = parsed_dict['physical-interface']
# Storing port properties
with open(f'{settings.PORTS_PATH}{host['host_name']}.json', 'w+') as file:
for item in parsed_list:
# If logical interfaces, delete them.
try:
del item['logical-interface']
except KeyError:
pass
# format for storing each port-configuration falls in
# each column
file.write(f'{json.dumps(item)}\n')
print(f'{settings.PORTS_PATH}{host['host_name']}.json saved')
line_break()
except Exception as err:
print(f'Unable to find Host {err}')
line_break()
def run(self, password: str):
self.learner(password)
def prompt(self):
print()
colour_print('(colour_cmd)\u250D' + ('\u2501' * 50) + '\u2511')
colour_print('\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 21) + '(colour_warning)WARNING:(colour_cmd)' + (' ' * 21) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)Do not run Port Learner '
'until you(colour_cmd)' + (' ' * 9) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)have run Port Checker and '
'you have(colour_cmd)' + (' ' * 8) + '\u2502')
colour_print(
'\u2502' + (' ' * 12) + '(colour_clear)verified any changed '
'ports.(colour_cmd)' + (' ' * 11) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 6) + '(colour_clear)Once the new port statuses are '
'learned(colour_cmd)' + (' ' * 6) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)the new values will be the '
'norm and(colour_cmd)' + (' ' * 7) + '\u2502')
colour_print(
'\u2502' + (' ' * 13) + '(colour_clear)problems may be '
'masked.(colour_cmd)' + (' ' * 14) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)Press (colour_cmd)y (colour_clear)to continue'
' or press (colour_cmd)ENTER(colour_cmd)' + (' ' * 8) + '\u2502')
colour_print(
'\u2502' + (' ' * 12) + '(colour_clear)to return to the (colour_rocky)Rocky '
'(colour_clear)CLI.(colour_cmd)' + (' ' * 11) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2515' + ('\u2501' * 50) + '\u2519(colour_clear)')
print()
| # stdlib
import json
# lib
from jnpr.junos import Device
from lxml import etree
import xmltodict
# local
from bin import Hosts
from bin.utils import (
colour_print,
error,
line_break,
)
import settings
class Learner(Hosts):
"""
Command: learner
Desc: inherits hosts from Hosts and learns about its information
"""
def __init__(self):
Hosts.__init__(self)
def learner(self, password: str):
self.prompt()
confirm = input(' ').lower()
if confirm != 'y':
return None
line_break()
for host in self.juniper_hosts:
print(
host['host_name'],
host['ip'],
host['username'],
)
dev = Device(
host=host['ip'],
user=host['username'],
password=password,
port=22,
)
try:
dev.open()
interfaces = dev.rpc.get_interface_information(
terse=True,
)
dev.close()
interfaces = etree.tostring(interfaces)
parsed_dict = dict(xmltodict.parse(interfaces))
parsed_dict = parsed_dict['interface-information']
parsed_list = parsed_dict['physical-interface']
with open(f'{settings.PORTS_PATH}{host["host_name"]}.json', 'w+') as file:
for item in parsed_list:
try:
del item['logical-interface']
except KeyError:
pass
file.write(f'{json.dumps(item)}\n')
print(f'{settings.PORTS_PATH}{host["host_name"]}.json saved')
line_break()
except Exception as err:
error(f'Unable to find the Host {err}')
line_break()
line_break()
for host in self.rocky_hosts:
print(
host['host_name'],
host['ip'],
host['username'],
)
# call devices
dev = Device(
host=host['ip'],
user=host['username'],
password=password,
port=22,
)
try:
dev.open()
interfaces = dev.rpc.get_interface_information(
terse=True,
)
dev.close()
interfaces = etree.tostring(interfaces)
parsed_dict = dict(xmltodict.parse(interfaces))
# Strip out only 'interface-information'
parsed_dict = parsed_dict['interface-information']
# Strip out only 'physical-interface'
parsed_list = parsed_dict['physical-interface']
# Storing port properties
with open(f'{settings.PORTS_PATH}{host["host_name"]}.json', 'w+') as file:
for item in parsed_list:
# If logical interfaces, delete them.
try:
del item['logical-interface']
except KeyError:
pass
# format for storing each port-configuration falls in
# each column
file.write(f'{json.dumps(item)}\n')
print(f'{settings.PORTS_PATH}{host["host_name"]}.json saved')
line_break()
except Exception as err:
print(f'Unable to find Host {err}')
line_break()
def run(self, password: str):
self.learner(password)
def prompt(self):
print()
colour_print('(colour_cmd)\u250D' + ('\u2501' * 50) + '\u2511')
colour_print('\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 21) + '(colour_warning)WARNING:(colour_cmd)' + (' ' * 21) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)Do not run Port Learner '
'until you(colour_cmd)' + (' ' * 9) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)have run Port Checker and '
'you have(colour_cmd)' + (' ' * 8) + '\u2502')
colour_print(
'\u2502' + (' ' * 12) + '(colour_clear)verified any changed '
'ports.(colour_cmd)' + (' ' * 11) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 6) + '(colour_clear)Once the new port statuses are '
'learned(colour_cmd)' + (' ' * 6) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)the new values will be the '
'norm and(colour_cmd)' + (' ' * 7) + '\u2502')
colour_print(
'\u2502' + (' ' * 13) + '(colour_clear)problems may be '
'masked.(colour_cmd)' + (' ' * 14) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2502' + (' ' * 8) + '(colour_clear)Press (colour_cmd)y (colour_clear)to continue'
' or press (colour_cmd)ENTER(colour_cmd)' + (' ' * 8) + '\u2502')
colour_print(
'\u2502' + (' ' * 12) + '(colour_clear)to return to the (colour_rocky)Rocky '
'(colour_clear)CLI.(colour_cmd)' + (' ' * 11) + '\u2502')
colour_print(
'\u2502' + (' ' * 50) + '\u2502')
colour_print(
'\u2515' + ('\u2501' * 50) + '\u2519(colour_clear)')
print()
|
import datetime
import tempfile
import copy
import boto3
import botocore
from rixtribute.helper import get_boto_session, generate_tags, get_external_ip, get_uuid_part_str
from rixtribute import aws_helper
import base64
import os
from typing import List, Optional, Tuple
from rixtribute import ssh
from rixtribute.configuration import config
from rixtribute.ecr import ECR
# from rixtribute.ssh import (
# ssh as _ssh,
# scp as _scp,
# ssh_command as _ssh,
import json
from enum import Enum
# For typing and auto-completion
try:
if TYPE_CHECKING is not None:
from .configuration import ProfileParser
except NameError as e:
pass
class IpProtocol(Enum):
TCP = 'tcp'
UDP = 'udp'
ICMP = 'icmp'
class EC2Instance(object):
def __init__(self, boto_instance_dict :dict):
self.instance_name = ''
try:
for tag in boto_instance_dict["Tags"]:
key = tag["Key"]
value = tag["Value"]
if key == 'Name':
self.instance_name = value
except KeyError as e:
pass
self.instance_id :str = boto_instance_dict.get("InstanceId", '')
self.image_id :str = boto_instance_dict.get("ImageId", '')
self.instance_type :str = boto_instance_dict.get("InstanceType", '')
self.status :str = boto_instance_dict.get("State", {}).get("Name", '')
self.ssh_port :int = 22
self.workdir = "/workdir"
self.uptime = datetime.datetime.utcnow()-boto_instance_dict["LaunchTime"].replace(tzinfo=None)
if self.status != "running": self.uptime = ''
self.spot_request_id :str = boto_instance_dict.get("SpotInstanceRequestId", '')
# Spot or not
self.instance_lifecycle :str = boto_instance_dict.get("InstanceLifecycle", '')
self.public_dns :str = boto_instance_dict.get("PublicDnsName", '')
def get_printable_dict(self):
keys = [
"instance_name",
"instance_id",
"status",
"uptime",
"instance_lifecycle",
"instance_type",
"public_dns"
]
return {k: getattr(self, k) for k in keys if k in keys}
def cancel_spot_request(self):
EC2.cancel_spot_instance_request(self.spot_request_id)
def stop(self):
session = get_boto_session()
client = session.client("ec2")
print(f"Stopping name/id: {self.instance_name}/{self.instance_id}")
# Handle spot requests seperately
if self.spot_request_id:
print(f"this is a spot instance, cancelling spot request: {self.spot_request_id}")
self.cancel_spot_request()
res = client.stop_instances(
InstanceIds=[self.instance_id]
)
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to stop {self.instance_id}")
print("Wait for instance to stop..")
waiter = client.get_waiter('instance_stopped')
waiter.wait(
InstanceIds=instance_ids,
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
def terminate(self):
session = get_boto_session()
client = session.client("ec2")
print(f"Terminating name/id: {self.instance_name}/{self.instance_id}")
if self.spot_request_id:
print(f"this is a spot instance, cancelling spot request: {self.spot_request_id}")
self.cancel_spot_request()
res = client.terminate_instances(
InstanceIds=[self.instance_id]
)
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to terminate {self.instance_id}")
print("Wait for instance to terminate..")
waiter = client.get_waiter('instance_terminated')
waiter.wait(
InstanceIds=[self.instance_id],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
def get_username(self):
user = EC2.determine_ec2_user_from_image_id(self.image_id)
return user
def ssh(self):
# key = None
# key = "/home/jri/.ssh/jesper_ssh.pem"
key = config.get_ssh_key()["private_key"]
user = self.get_username()
ssh.ssh(host=self.public_dns, user=user, port=self.ssh_port, key_str=key)
def docker_run(self, cmd :str=None):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
docker_gpu = '$(nvidia-smi --list-gpus > /dev/null && echo "--gpus=all")'
if cmd != None:
f = tempfile.NamedTemporaryFile(suffix='_temp', prefix='rxtb_', delete=True)
f.write(f"#!/bin/sh\n".encode("utf8"))
f.write(cmd.encode("utf8"))
f.flush()
cmd_file_abs_path = f.name
cmd_file_name = os.path.basename(cmd_file_abs_path)
# SCP the command file
self.copy_files_to_tmp([cmd_file_abs_path])
cmd_str = (
f'docker run --rm -v /tmp/{cmd_file_name}:/cmd.sh --entrypoint="" {docker_gpu} $DOCKER_IMAGE bash '
f"/cmd.sh"
)
else:
cmd_str = "docker run --rm "+docker_gpu+" $DOCKER_IMAGE"
# cmd = "docker run --gpus=all $DOCKER_IMAGE"
ssh.ssh_command_tmux(host=self.public_dns, command=cmd_str, user=user, port=self.ssh_port, key_str=key)
def copy_files_to_tmp(self, files :List[str], recursive :bool=False):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
dest = f"{user}@{self.public_dns}:/tmp/"
success = ssh.scp(source=files,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def copy_files_to_workdir(self, files :List[str], recursive :bool=False):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
dest = f"{user}@{self.public_dns}:{self.workdir}"
success = ssh.scp(source=files,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def copy_files_from_workdir(self, source :List[str], recursive :bool, dest :str='.'):
""" Copy files from the instance workdir """
key = config.get_ssh_key()["private_key"]
key = None
user = self.get_username()
source_prefix = f"{user}@{self.public_dns}:"
for i, path in enumerate(source):
source[i] = source_prefix + os.path.join(self.workdir, path)
success = ssh.scp(source=source,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def list_files(self):
user = self.get_username()
host = self.public_dns
key = None
ssh.ssh_command(host=host,
user=user,
command=f"ls -1a {self.workdir}/",
port=self.ssh_port,
key=key,
print_output=True)
class EC2(object):
def __init__(self, boto_object):
""" STATIC class used as API """
pass
@staticmethod
def encode_userdata(data :str):
return base64.b64encode(data.encode('utf-8')).decode('utf-8')
@staticmethod
def determine_ec2_user_from_image_id(image_id :str):
session = get_boto_session()
client = session.client("ec2")
res = client.describe_images(
Filters = [
{'Name': 'image-id', 'Values': [image_id,]},
])
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error in ami lookup '{image_id}")
image_name = res['Images'][0]['Name']
#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connection-prereqs.html
#####################################
# Distro # username
#####################################
# Amazon Linux # ec2-user
# Debian # admin
# Fedora # ec2-user or fedora
# RHEL # ec2-user or root
# SUSE # ec2-user or root
# Ubuntu # ubuntu
# CentOS # centos
#####################################
if 'amazon linux' in image_name.lower() or 'amzn2-ami' in image_name.lower():
return "ec2-user"
if 'centos' in image_name.lower():
return "centos"
elif 'debian' in image_name.lower():
return "admin"
elif 'fedora' in image_name.lower():
return "fedora"
elif 'rhel' in image_name.lower() or 'red hat' in image_name.lower():
return "ec2-user"
elif 'suse' in image_name.lower():
return "ec2-user"
elif 'ubuntu' in image_name.lower():
return "ubuntu"
else:
raise Exception(f"Unknown ami, {image_id}")
@classmethod
def _get_session(cls, region_name :str=None):
session = get_boto_session(region_name=region_name)
return session
@classmethod
def _get_ec2_boto_client(cls, region_name :str=None):
session = cls._get_session(region_name=region_name)
client = session.client("ec2")
return client
#########################
# BOTO3 API functions #
#########################
@classmethod
def list_instances(cls, profile, all=False) -> List[EC2Instance]:
client = cls._get_ec2_boto_client()
if all is True:
response = client.describe_instances()
else:
response = client.describe_instances(
Filters = [
{'Name': 'tag:origin', 'Values': ['rixtribute',]},
{'Name': 'tag:origin-email', 'Values': [profile.email,]},
{'Name': 'tag:origin-name', 'Values': [profile.name,]},
]
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
instances = []
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(EC2Instance(instance))
return instances
@classmethod
def get_instance_from_dns_name(cls, dns_name :str) -> Optional[EC2Instance]:
client = cls._get_ec2_boto_client()
response = client.describe_instances(
Filters = [
{'Name': 'dns-name', 'Values': [dns_name]},
]
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
instances = []
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(EC2Instance(instance))
try:
return instances[0]
except IndexError as e:
return None
return instances[0]
@classmethod
def list_regions(cls) -> List[str]:
ec2 = EC2._get_ec2_boto_client()
response = ec2.describe_regions()
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
return [row['RegionName'] for row in response['Regions']]
@classmethod
def list_spot_prices(cls,
instance_types: List[str],
region_names :List[str],
start_dt :datetime.datetime=None,
end_dt :datetime.datetime=None,
range_dt :datetime.timedelta=None
) -> List[dict]:
""" Get spot pricing for instance_types in regions
valid usage:
start_dt
start_dt and end_dt
range_dt
(No dt or range) latest 3 hours
Args:
None
instance_types list of instance type e.g. m5.xlarge
region_names list of regions
start_dt (optional) start datetime for spot pricing window
end_dt (optional) end datetime for spot pricing window
range_dt (optional) range timedelta from now for spot pricing window
Returns:
dict with pricing info
"""
params :dict = {
"InstanceTypes": instance_types,
"ProductDescriptions": ['Linux/UNIX'],
}
if range_dt is not None:
now = datetime.datetime.today()
params["StartTime"] = now - range_dt
params["EndTime"] = now
elif start_dt is not None or end_dt is not None:
if start_dt is not None: params["StartTime"] = start_dt
if end_dt is not None: params["EndTime"] = end_dt
else:
# 3 hours window
params["StartTime"] = datetime.datetime.today() - datetime.timedelta(hours=3)
# if VERBOSE:
# print(params)
prices_l :List[dict] = list()
for region_name in region_names:
ec2 = cls._get_ec2_boto_client(region_name=region_name)
response = ec2.describe_spot_price_history(**params)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
for item in response['SpotPriceHistory']:
prices_l.append({
"zone": item['AvailabilityZone'],
"price": float(item['SpotPrice']),
"instance-type": item['InstanceType'],
})
return prices_l
@classmethod
def list_prices(cls, instance_types :List[str], region_names :List[str]) -> List[dict]:
"""Return list of prices for instance_type
Args:
instance_types : types of ec2 instances e.g. ['p3.8xlarge']
region_names : list of region codes e.g ['us-east-1', 'eu-west-1']
Returns:
list of dicts:
[{'region_name': 'eu-west-1',
'instance_type': 'p3.8xlarge',
'price': 13.22},]
"""
# pricing = cls._get_session(region_name="us-east-1").client("pricing")
pricing = EC2._get_session(region_name="us-east-1").client("pricing")
region_names = [aws_helper.region_name_to_region_code(x) for x in region_names]
# region = aws_helper.region_code_to_region_name(region_name)
# region1 = aws_helper.region_code_to_region_name("eu-west-2")
# locations
# locations = pricing.get_attribute_values(ServiceCode="AmazonEC2",AttributeName="location")
# locations["AttributeValues"]
prices_l :list = []
for instance_type in instance_types:
response = pricing.get_products(
ServiceCode='AmazonEC2',
Filters=[
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
# {'Type': 'TERM_MATCH', 'Field': 'location', 'Value': region},
{'Type': 'TERM_MATCH', 'Field': 'capacitystatus', 'Value': 'UnusedCapacityReservation'},
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': instance_type},
{'Type': 'TERM_MATCH', 'Field': 'tenancy', 'Value': 'Shared'},
{'Type': 'TERM_MATCH', 'Field': 'preInstalledSw', 'Value': 'NA'}
],
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
# Run through price list and return filter locations
for price_elm in response['PriceList']:
elm = json.loads(price_elm)
location = elm['product']['attributes']['location']
instance_type = elm['product']['attributes']['instanceType']
if location in region_names:
on_demand = json.loads(price_elm)['terms']['OnDemand']
# Example of price element:
# {'F78KERDK968ABWNU.JRTCKXETXF': {
# 'priceDimensions': {
# 'F78KERDK968ABWNU.JRTCKXETXF.6YS6EN2CT7': {
# 'unit': 'Hrs',
# 'endRange': 'Inf',
# 'description': '$13.22 per Unused Reservation Linux p3.8xlarge Instance Hour',
# 'appliesTo': [],
# 'rateCode': 'F78KERDK968ABWNU.JRTCKXETXF.6YS6EN2CT7',
# 'beginRange': '0',
# 'pricePerUnit': {'USD': '13.2200000000'}
# }
# },
# 'sku': 'F78KERDK968ABWNU',
# 'effectiveDate': '2020-10-01T00:00:00Z',
# 'offerTermCode': 'JRTCKXETXF',
# 'termAttributes': {}
# }}
# NOTE: hack to get into pricePerUnit since keys are obscure
key1 = list(on_demand.keys())[0]
key2 = list(on_demand[key1]['priceDimensions'].keys())[0]
price = on_demand[key1]['priceDimensions'][key2]['pricePerUnit']['USD']
prices_l.append({"region-name": aws_helper.region_code_to_region_name(location),
"instance-type": instance_type,
"price": float(price)})
return prices_l
# @staticmethod
# def lookup_ami(image_id :str):
# session = get_boto_session()
# client = session.client("ec2")
# res = client.describe_images(
# Filters = [
# {'Name': 'image-id', 'Values': ['ami-0d30ddc9d3cc48bac',]},
# ])
# client.describe_images(
# Filters = [
# {'Name': 'image-id', 'Values': ['ami-036f665a5b53a18ce',]},
# ])
@staticmethod
def create_spot_instance(instance_cfg :dict):
session = get_boto_session()
client = session.client("ec2")
cfg = instance_cfg["config"]
region_name = aws_helper.strip_to_region(cfg['region'])
instance_username = EC2.determine_ec2_user_from_image_id(cfg["ami"])
# TODO: if no key then create a key and use it
# TODO start and test env variables
aws_access_key = session.get_credentials().access_key
aws_secret_key = session.get_credentials().secret_key
aws_default_region = session.region_name
user_data = (
f"#!/usr/bin/env bash\n"
f'su - {instance_username} <<AAA\n'
f'echo "export TERM=xterm-256color" >> ~/.bashrc\n'
f'echo "source /home/{instance_username}/.profile" >> ~/.bashrc\n'
f'echo "export AWS_ACCESS_KEY_ID={aws_access_key}" >> ~/.profile\n'
f'echo "export AWS_SECRET_ACCESS_KEY={aws_secret_key}" >> ~/.profile\n'
f'echo "export AWS_DEFAULT_REGION={aws_default_region}" >> ~/.profile\n'
f'echo "set-option -g allow-rename off" > ~/.tmux.conf\n'
f'mkdir ~/workdir\n'
f'AAA\n'
f'source /home/{instance_username}/.profile\n'
f'ln -s /home/{instance_username}/workdir /workdir\n'
)
# IF container then add it to userdata
container_name = instance_cfg.get('container', None)
if container_name:
container_cfg = config.get_container(container_name)
repo = ECR.get_repository(container_cfg['tag'], region_name=region_name)
if repo is not None:
user_data += (
f'su - {instance_username} <<AAA\n'
f'echo "DOCKER_IMAGE=\"{repo.repository_uri}:latest\"" >> ~/.profile\n'
f'$(aws ecr get-login --no-include-email --region {region_name})\n'
f'docker pull {repo.repository_uri}:latest\n'
f'source /home/{instance_username}/.profile\n'
f'AAA\n'
)
encoded_user_data = EC2.encode_userdata(user_data)
security_group_id = EC2.get_or_create_security_group(instance_cfg["name"])
EC2.update_ingress_rules(security_group_id, cfg["ports"])
ssh_key_name = EC2.get_valid_key_pair_name()
name = instance_cfg["name"] + "-" + get_uuid_part_str()
response = client.request_spot_instances(
InstanceCount=1,
LaunchSpecification={
'ImageId': cfg['ami'],
'InstanceType': cfg['type'],
'KeyName': ssh_key_name,
'Placement': {
'AvailabilityZone': cfg['region'],
},
# 'Monitoring': {
# 'Enabled': True,
# },
'SecurityGroupIds': [
security_group_id,
],
'BlockDeviceMappings': [
{'DeviceName': x['devname'],
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': x['size']
}
} for x in cfg['volumes']
],
'UserData': encoded_user_data,
},
TagSpecifications=[
{'ResourceType': 'spot-instances-request',
'Tags': generate_tags(name),
},
],
Type='persistent',
InstanceInterruptionBehavior='stop',
)
request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
print("Wait for request to be fulfilled...")
waiter = client.get_waiter('spot_instance_request_fulfilled')
waiter.wait(
SpotInstanceRequestIds=[request_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100,
}
)
response = client.describe_spot_instance_requests(SpotInstanceRequestIds=[request_id,])
instance_id = response['SpotInstanceRequests'][0]['InstanceId']
response = client.create_tags(
Resources=[instance_id,],
Tags=generate_tags(name),
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when adding tags to instance_id: {instance_id}")
print("Wait for instance to start..")
waiter = client.get_waiter('instance_status_ok')
waiter.wait(
InstanceIds=[instance_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
@staticmethod
def cancel_spot_instance_request(spot_request_id: str):
session = get_boto_session()
client = session.client("ec2")
response = client.cancel_spot_instance_requests(
SpotInstanceRequestIds=[spot_request_id]
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to stop {spot_request_id}")
print("Wait for spot request(s) to cancel..")
waiter = client.get_waiter('spot_instance_request_fulfilled')
waiter.wait(
SpotInstanceRequestIds=[spot_request_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100,
}
)
@staticmethod
def create_security_group(instance_name :str) -> str:
session = get_boto_session()
client = session.client("ec2")
# NOTE groupNames are prefixed with: "rxtb-"
response = client.create_security_group(
Description="Created from rxtb",
GroupName=f"rxtb-{instance_name}",
TagSpecifications=[
{'ResourceType': 'security-group',
'Tags': generate_tags(instance_name),
},
],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
group_id = response["GroupId"]
return group_id
@staticmethod
def get_or_create_security_group(instance_name :str) -> str:
session = get_boto_session()
client = session.client("ec2")
# NOTE groupNames are prefixed with: "rxtb-"
response = client.describe_security_groups(
Filters=[
{
'Name': 'group-name',
'Values': [
f"rxtb-{instance_name}",
]
},
]
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
if len(response["SecurityGroups"]) <= 0:
print("Creating sg")
group_id = EC2.create_security_group(instance_name)
else:
group_id = response["SecurityGroups"][0]["GroupId"]
return group_id
@staticmethod
def update_ingress_rules(security_group_id :str, ports :List[dict]):
"""Update ingress rules
Args:
security_group_id : security group id
ports : list of ports in format: {"port": 22, "protocol": "tcp"}
Returns:
None
"""
session = get_boto_session()
security_group = session.resource("ec2").SecurityGroup(security_group_id)
ports = copy.deepcopy(ports)
public_ip_range = get_external_ip() + "/32"
ip_permissions = security_group.ip_permissions
for port in ports:
match = False
for rule in ip_permissions:
for ip_range in rule["IpRanges"]:
if ip_range["CidrIp"] == public_ip_range:
ip_match = True
if port["port"] == rule["FromPort"] and rule["IpProtocol"] == port["protocol"]:
match = True
else:
match = False
if match == False:
print(f"adding ingress rule to security_group, port={port["port"]}, protocol={port["protocol"]}")
security_group.authorize_ingress(
CidrIp=public_ip_range,
ToPort=port["port"],
FromPort=port["port"],
IpProtocol=port["protocol"],
)
@staticmethod
def create_key_pair() -> Tuple[str, str]:
"""Create an SSH key-pair
Args:
None
Returns:
(key name, fingerprint)
"""
session = get_boto_session()
client = session.client("ec2")
project_name = config.project_name
response = client.create_key_pair(
KeyName=f"rxtb-{get_uuid_part_str()}",
TagSpecifications=[
{'ResourceType': 'key-pair',
'Tags': generate_tags(project_name),
},
],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
return (response["KeyName"], response["KeyMaterial"])
@staticmethod
def verify_key_pair_name(key_name :str) -> bool:
session = get_boto_session()
client = session.client("ec2")
try:
response = client.describe_key_pairs(
KeyNames=[key_name],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to describe_key_pairs")
except botocore.exceptions.ClientError as e:
return False
return True
@staticmethod
def get_valid_key_pair_name() -> str:
# load existing key if any
ssh_key_name = config.get_ssh_key().get("key_name", None)
if ssh_key_name == None or EC2.verify_key_pair_name(ssh_key_name) == False:
key_name, private_key = EC2.create_key_pair()
config.add_ssh_key(key_name, private_key)
ssh_key_name = key_name
return ssh_key_name
"""
ssh ec2-user@ec2-18-203-155-39.eu-west-1.compute.amazonaws.com
BOTO OBJECT EXAMPLE
{'Groups': [],
'Instances': [{'AmiLaunchIndex': 0,
'ImageId': 'ami-003a0987ccad642ec',
'InstanceId': 'i-092ecefa5e0781cfd',
'InstanceType': 't2.micro',
'KeyName': 'steffen-ssh-key',
'LaunchTime': datetime.datetime(2019, 9, 5, 6, 25, 2, tzinfo=tzutc()),
'Monitoring': {'State': 'disabled'},
'Placement': {'AvailabilityZone': 'eu-west-1c',
'GroupName': '',
'Tenancy': 'default'},
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209',
'ProductCodes': [],
'PublicDnsName': '',
'State': {'Code': 80, 'Name': 'stopped'},
'StateTransitionReason': 'User initiated (2019-09-05 06:53:58 GMT)',
'SubnetId': 'subnet-9b6255fe',
'VpcId': 'vpc-c6c486a3',
'Architecture': 'x86_64',
'BlockDeviceMappings': [{'DeviceName': '/dev/sda1',
'Ebs': {'AttachTime': datetime.datetime(2019, 9, 5, 6, 25, 3, tzinfo=tzutc()),
'DeleteOnTermination': True,
'Status': 'attached',
'VolumeId': 'vol-0331b158407c04ebb'}}],
'ClientToken': '',
'EbsOptimized': False,
'EnaSupport': True,
'Hypervisor': 'xen',
'NetworkInterfaces': [{'Attachment': {'AttachTime': datetime.datetime(2019, 9, 5, 6, 25, 2, tzinfo=tzutc()),
'AttachmentId': 'eni-attach-0d55e48d960945264',
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Status': 'attached'},
'Description': '',
'Groups': [{'GroupName': 'launch-wizard-7',
'GroupId': 'sg-09e76ae1c227db804'}],
'Ipv6Addresses': [],
'MacAddress': '02:36:28:a3:8f:30',
'NetworkInterfaceId': 'eni-0a6a4fb64050fc8a4',
'OwnerId': '293331033030',
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209',
'PrivateIpAddresses': [{'Primary': True,
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209'}],
'SourceDestCheck': True,
'Status': 'in-use',
'SubnetId': 'subnet-9b6255fe',
'VpcId': 'vpc-c6c486a3',
'InterfaceType': 'interface'}],
'RootDeviceName': '/dev/sda1',
'RootDeviceType': 'ebs',
'SecurityGroups': [{'GroupName': 'launch-wizard-7',
'GroupId': 'sg-09e76ae1c227db804'}],
'SourceDestCheck': True,
'StateReason': {'Code': 'Client.UserInitiatedShutdown',
'Message': 'Client.UserInitiatedShutdown: User initiated shutdown'},
'Tags': [{'Key': 'Name', 'Value': 'steffen-rstudio-test'}],
'VirtualizationType': 'hvm',
'CpuOptions': {'CoreCount': 1, 'ThreadsPerCore': 1},
'CapacityReservationSpecification': {'CapacityReservationPreference': 'open'},
'HibernationOptions': {'Configured': False},
'MetadataOptions': {'State': 'applied',
'HttpTokens': 'optional',
'HttpPutResponseHopLimit': 1,
'HttpEndpoint': 'enabled'}}],
'OwnerId': '293331033030',
'ReservationId': 'r-0dd4d193f8704f1f8'}
"""
# To test
#rxtb start
| import datetime
import tempfile
import copy
import boto3
import botocore
from rixtribute.helper import get_boto_session, generate_tags, get_external_ip, get_uuid_part_str
from rixtribute import aws_helper
import base64
import os
from typing import List, Optional, Tuple
from rixtribute import ssh
from rixtribute.configuration import config
from rixtribute.ecr import ECR
# from rixtribute.ssh import (
# ssh as _ssh,
# scp as _scp,
# ssh_command as _ssh,
import json
from enum import Enum
# For typing and auto-completion
try:
if TYPE_CHECKING is not None:
from .configuration import ProfileParser
except NameError as e:
pass
class IpProtocol(Enum):
TCP = 'tcp'
UDP = 'udp'
ICMP = 'icmp'
class EC2Instance(object):
def __init__(self, boto_instance_dict :dict):
self.instance_name = ''
try:
for tag in boto_instance_dict["Tags"]:
key = tag["Key"]
value = tag["Value"]
if key == 'Name':
self.instance_name = value
except KeyError as e:
pass
self.instance_id :str = boto_instance_dict.get("InstanceId", '')
self.image_id :str = boto_instance_dict.get("ImageId", '')
self.instance_type :str = boto_instance_dict.get("InstanceType", '')
self.status :str = boto_instance_dict.get("State", {}).get("Name", '')
self.ssh_port :int = 22
self.workdir = "/workdir"
self.uptime = datetime.datetime.utcnow()-boto_instance_dict["LaunchTime"].replace(tzinfo=None)
if self.status != "running": self.uptime = ''
self.spot_request_id :str = boto_instance_dict.get("SpotInstanceRequestId", '')
# Spot or not
self.instance_lifecycle :str = boto_instance_dict.get("InstanceLifecycle", '')
self.public_dns :str = boto_instance_dict.get("PublicDnsName", '')
def get_printable_dict(self):
keys = [
"instance_name",
"instance_id",
"status",
"uptime",
"instance_lifecycle",
"instance_type",
"public_dns"
]
return {k: getattr(self, k) for k in keys if k in keys}
def cancel_spot_request(self):
EC2.cancel_spot_instance_request(self.spot_request_id)
def stop(self):
session = get_boto_session()
client = session.client("ec2")
print(f"Stopping name/id: {self.instance_name}/{self.instance_id}")
# Handle spot requests seperately
if self.spot_request_id:
print(f"this is a spot instance, cancelling spot request: {self.spot_request_id}")
self.cancel_spot_request()
res = client.stop_instances(
InstanceIds=[self.instance_id]
)
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to stop {self.instance_id}")
print("Wait for instance to stop..")
waiter = client.get_waiter('instance_stopped')
waiter.wait(
InstanceIds=instance_ids,
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
def terminate(self):
session = get_boto_session()
client = session.client("ec2")
print(f"Terminating name/id: {self.instance_name}/{self.instance_id}")
if self.spot_request_id:
print(f"this is a spot instance, cancelling spot request: {self.spot_request_id}")
self.cancel_spot_request()
res = client.terminate_instances(
InstanceIds=[self.instance_id]
)
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to terminate {self.instance_id}")
print("Wait for instance to terminate..")
waiter = client.get_waiter('instance_terminated')
waiter.wait(
InstanceIds=[self.instance_id],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
def get_username(self):
user = EC2.determine_ec2_user_from_image_id(self.image_id)
return user
def ssh(self):
# key = None
# key = "/home/jri/.ssh/jesper_ssh.pem"
key = config.get_ssh_key()["private_key"]
user = self.get_username()
ssh.ssh(host=self.public_dns, user=user, port=self.ssh_port, key_str=key)
def docker_run(self, cmd :str=None):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
docker_gpu = '$(nvidia-smi --list-gpus > /dev/null && echo "--gpus=all")'
if cmd != None:
f = tempfile.NamedTemporaryFile(suffix='_temp', prefix='rxtb_', delete=True)
f.write(f"#!/bin/sh\n".encode("utf8"))
f.write(cmd.encode("utf8"))
f.flush()
cmd_file_abs_path = f.name
cmd_file_name = os.path.basename(cmd_file_abs_path)
# SCP the command file
self.copy_files_to_tmp([cmd_file_abs_path])
cmd_str = (
f'docker run --rm -v /tmp/{cmd_file_name}:/cmd.sh --entrypoint="" {docker_gpu} $DOCKER_IMAGE bash '
f"/cmd.sh"
)
else:
cmd_str = "docker run --rm "+docker_gpu+" $DOCKER_IMAGE"
# cmd = "docker run --gpus=all $DOCKER_IMAGE"
ssh.ssh_command_tmux(host=self.public_dns, command=cmd_str, user=user, port=self.ssh_port, key_str=key)
def copy_files_to_tmp(self, files :List[str], recursive :bool=False):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
dest = f"{user}@{self.public_dns}:/tmp/"
success = ssh.scp(source=files,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def copy_files_to_workdir(self, files :List[str], recursive :bool=False):
key = config.get_ssh_key()["private_key"]
user = self.get_username()
dest = f"{user}@{self.public_dns}:{self.workdir}"
success = ssh.scp(source=files,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def copy_files_from_workdir(self, source :List[str], recursive :bool, dest :str='.'):
""" Copy files from the instance workdir """
key = config.get_ssh_key()["private_key"]
key = None
user = self.get_username()
source_prefix = f"{user}@{self.public_dns}:"
for i, path in enumerate(source):
source[i] = source_prefix + os.path.join(self.workdir, path)
success = ssh.scp(source=source,
dest=dest,
recursive=recursive,
port=self.ssh_port,
key_str=key)
return success
def list_files(self):
user = self.get_username()
host = self.public_dns
key = None
ssh.ssh_command(host=host,
user=user,
command=f"ls -1a {self.workdir}/",
port=self.ssh_port,
key=key,
print_output=True)
class EC2(object):
def __init__(self, boto_object):
""" STATIC class used as API """
pass
@staticmethod
def encode_userdata(data :str):
return base64.b64encode(data.encode('utf-8')).decode('utf-8')
@staticmethod
def determine_ec2_user_from_image_id(image_id :str):
session = get_boto_session()
client = session.client("ec2")
res = client.describe_images(
Filters = [
{'Name': 'image-id', 'Values': [image_id,]},
])
if res and res["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error in ami lookup '{image_id}")
image_name = res['Images'][0]['Name']
#see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connection-prereqs.html
#####################################
# Distro # username
#####################################
# Amazon Linux # ec2-user
# Debian # admin
# Fedora # ec2-user or fedora
# RHEL # ec2-user or root
# SUSE # ec2-user or root
# Ubuntu # ubuntu
# CentOS # centos
#####################################
if 'amazon linux' in image_name.lower() or 'amzn2-ami' in image_name.lower():
return "ec2-user"
if 'centos' in image_name.lower():
return "centos"
elif 'debian' in image_name.lower():
return "admin"
elif 'fedora' in image_name.lower():
return "fedora"
elif 'rhel' in image_name.lower() or 'red hat' in image_name.lower():
return "ec2-user"
elif 'suse' in image_name.lower():
return "ec2-user"
elif 'ubuntu' in image_name.lower():
return "ubuntu"
else:
raise Exception(f"Unknown ami, {image_id}")
@classmethod
def _get_session(cls, region_name :str=None):
session = get_boto_session(region_name=region_name)
return session
@classmethod
def _get_ec2_boto_client(cls, region_name :str=None):
session = cls._get_session(region_name=region_name)
client = session.client("ec2")
return client
#########################
# BOTO3 API functions #
#########################
@classmethod
def list_instances(cls, profile, all=False) -> List[EC2Instance]:
client = cls._get_ec2_boto_client()
if all is True:
response = client.describe_instances()
else:
response = client.describe_instances(
Filters = [
{'Name': 'tag:origin', 'Values': ['rixtribute',]},
{'Name': 'tag:origin-email', 'Values': [profile.email,]},
{'Name': 'tag:origin-name', 'Values': [profile.name,]},
]
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
instances = []
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(EC2Instance(instance))
return instances
@classmethod
def get_instance_from_dns_name(cls, dns_name :str) -> Optional[EC2Instance]:
client = cls._get_ec2_boto_client()
response = client.describe_instances(
Filters = [
{'Name': 'dns-name', 'Values': [dns_name]},
]
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
instances = []
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
instances.append(EC2Instance(instance))
try:
return instances[0]
except IndexError as e:
return None
return instances[0]
@classmethod
def list_regions(cls) -> List[str]:
ec2 = EC2._get_ec2_boto_client()
response = ec2.describe_regions()
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
return [row['RegionName'] for row in response['Regions']]
@classmethod
def list_spot_prices(cls,
instance_types: List[str],
region_names :List[str],
start_dt :datetime.datetime=None,
end_dt :datetime.datetime=None,
range_dt :datetime.timedelta=None
) -> List[dict]:
""" Get spot pricing for instance_types in regions
valid usage:
start_dt
start_dt and end_dt
range_dt
(No dt or range) latest 3 hours
Args:
None
instance_types list of instance type e.g. m5.xlarge
region_names list of regions
start_dt (optional) start datetime for spot pricing window
end_dt (optional) end datetime for spot pricing window
range_dt (optional) range timedelta from now for spot pricing window
Returns:
dict with pricing info
"""
params :dict = {
"InstanceTypes": instance_types,
"ProductDescriptions": ['Linux/UNIX'],
}
if range_dt is not None:
now = datetime.datetime.today()
params["StartTime"] = now - range_dt
params["EndTime"] = now
elif start_dt is not None or end_dt is not None:
if start_dt is not None: params["StartTime"] = start_dt
if end_dt is not None: params["EndTime"] = end_dt
else:
# 3 hours window
params["StartTime"] = datetime.datetime.today() - datetime.timedelta(hours=3)
# if VERBOSE:
# print(params)
prices_l :List[dict] = list()
for region_name in region_names:
ec2 = cls._get_ec2_boto_client(region_name=region_name)
response = ec2.describe_spot_price_history(**params)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
for item in response['SpotPriceHistory']:
prices_l.append({
"zone": item['AvailabilityZone'],
"price": float(item['SpotPrice']),
"instance-type": item['InstanceType'],
})
return prices_l
@classmethod
def list_prices(cls, instance_types :List[str], region_names :List[str]) -> List[dict]:
"""Return list of prices for instance_type
Args:
instance_types : types of ec2 instances e.g. ['p3.8xlarge']
region_names : list of region codes e.g ['us-east-1', 'eu-west-1']
Returns:
list of dicts:
[{'region_name': 'eu-west-1',
'instance_type': 'p3.8xlarge',
'price': 13.22},]
"""
# pricing = cls._get_session(region_name="us-east-1").client("pricing")
pricing = EC2._get_session(region_name="us-east-1").client("pricing")
region_names = [aws_helper.region_name_to_region_code(x) for x in region_names]
# region = aws_helper.region_code_to_region_name(region_name)
# region1 = aws_helper.region_code_to_region_name("eu-west-2")
# locations
# locations = pricing.get_attribute_values(ServiceCode="AmazonEC2",AttributeName="location")
# locations["AttributeValues"]
prices_l :list = []
for instance_type in instance_types:
response = pricing.get_products(
ServiceCode='AmazonEC2',
Filters=[
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
# {'Type': 'TERM_MATCH', 'Field': 'location', 'Value': region},
{'Type': 'TERM_MATCH', 'Field': 'capacitystatus', 'Value': 'UnusedCapacityReservation'},
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': instance_type},
{'Type': 'TERM_MATCH', 'Field': 'tenancy', 'Value': 'Shared'},
{'Type': 'TERM_MATCH', 'Field': 'preInstalledSw', 'Value': 'NA'}
],
)
assert response["ResponseMetadata"]["HTTPStatusCode"] == 200
# Run through price list and return filter locations
for price_elm in response['PriceList']:
elm = json.loads(price_elm)
location = elm['product']['attributes']['location']
instance_type = elm['product']['attributes']['instanceType']
if location in region_names:
on_demand = json.loads(price_elm)['terms']['OnDemand']
# Example of price element:
# {'F78KERDK968ABWNU.JRTCKXETXF': {
# 'priceDimensions': {
# 'F78KERDK968ABWNU.JRTCKXETXF.6YS6EN2CT7': {
# 'unit': 'Hrs',
# 'endRange': 'Inf',
# 'description': '$13.22 per Unused Reservation Linux p3.8xlarge Instance Hour',
# 'appliesTo': [],
# 'rateCode': 'F78KERDK968ABWNU.JRTCKXETXF.6YS6EN2CT7',
# 'beginRange': '0',
# 'pricePerUnit': {'USD': '13.2200000000'}
# }
# },
# 'sku': 'F78KERDK968ABWNU',
# 'effectiveDate': '2020-10-01T00:00:00Z',
# 'offerTermCode': 'JRTCKXETXF',
# 'termAttributes': {}
# }}
# NOTE: hack to get into pricePerUnit since keys are obscure
key1 = list(on_demand.keys())[0]
key2 = list(on_demand[key1]['priceDimensions'].keys())[0]
price = on_demand[key1]['priceDimensions'][key2]['pricePerUnit']['USD']
prices_l.append({"region-name": aws_helper.region_code_to_region_name(location),
"instance-type": instance_type,
"price": float(price)})
return prices_l
# @staticmethod
# def lookup_ami(image_id :str):
# session = get_boto_session()
# client = session.client("ec2")
# res = client.describe_images(
# Filters = [
# {'Name': 'image-id', 'Values': ['ami-0d30ddc9d3cc48bac',]},
# ])
# client.describe_images(
# Filters = [
# {'Name': 'image-id', 'Values': ['ami-036f665a5b53a18ce',]},
# ])
@staticmethod
def create_spot_instance(instance_cfg :dict):
session = get_boto_session()
client = session.client("ec2")
cfg = instance_cfg["config"]
region_name = aws_helper.strip_to_region(cfg['region'])
instance_username = EC2.determine_ec2_user_from_image_id(cfg["ami"])
# TODO: if no key then create a key and use it
# TODO start and test env variables
aws_access_key = session.get_credentials().access_key
aws_secret_key = session.get_credentials().secret_key
aws_default_region = session.region_name
user_data = (
f"#!/usr/bin/env bash\n"
f'su - {instance_username} <<AAA\n'
f'echo "export TERM=xterm-256color" >> ~/.bashrc\n'
f'echo "source /home/{instance_username}/.profile" >> ~/.bashrc\n'
f'echo "export AWS_ACCESS_KEY_ID={aws_access_key}" >> ~/.profile\n'
f'echo "export AWS_SECRET_ACCESS_KEY={aws_secret_key}" >> ~/.profile\n'
f'echo "export AWS_DEFAULT_REGION={aws_default_region}" >> ~/.profile\n'
f'echo "set-option -g allow-rename off" > ~/.tmux.conf\n'
f'mkdir ~/workdir\n'
f'AAA\n'
f'source /home/{instance_username}/.profile\n'
f'ln -s /home/{instance_username}/workdir /workdir\n'
)
# IF container then add it to userdata
container_name = instance_cfg.get('container', None)
if container_name:
container_cfg = config.get_container(container_name)
repo = ECR.get_repository(container_cfg['tag'], region_name=region_name)
if repo is not None:
user_data += (
f'su - {instance_username} <<AAA\n'
f'echo "DOCKER_IMAGE=\"{repo.repository_uri}:latest\"" >> ~/.profile\n'
f'$(aws ecr get-login --no-include-email --region {region_name})\n'
f'docker pull {repo.repository_uri}:latest\n'
f'source /home/{instance_username}/.profile\n'
f'AAA\n'
)
encoded_user_data = EC2.encode_userdata(user_data)
security_group_id = EC2.get_or_create_security_group(instance_cfg["name"])
EC2.update_ingress_rules(security_group_id, cfg["ports"])
ssh_key_name = EC2.get_valid_key_pair_name()
name = instance_cfg["name"] + "-" + get_uuid_part_str()
response = client.request_spot_instances(
InstanceCount=1,
LaunchSpecification={
'ImageId': cfg['ami'],
'InstanceType': cfg['type'],
'KeyName': ssh_key_name,
'Placement': {
'AvailabilityZone': cfg['region'],
},
# 'Monitoring': {
# 'Enabled': True,
# },
'SecurityGroupIds': [
security_group_id,
],
'BlockDeviceMappings': [
{'DeviceName': x['devname'],
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': x['size']
}
} for x in cfg['volumes']
],
'UserData': encoded_user_data,
},
TagSpecifications=[
{'ResourceType': 'spot-instances-request',
'Tags': generate_tags(name),
},
],
Type='persistent',
InstanceInterruptionBehavior='stop',
)
request_id = response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
print("Wait for request to be fulfilled...")
waiter = client.get_waiter('spot_instance_request_fulfilled')
waiter.wait(
SpotInstanceRequestIds=[request_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100,
}
)
response = client.describe_spot_instance_requests(SpotInstanceRequestIds=[request_id,])
instance_id = response['SpotInstanceRequests'][0]['InstanceId']
response = client.create_tags(
Resources=[instance_id,],
Tags=generate_tags(name),
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when adding tags to instance_id: {instance_id}")
print("Wait for instance to start..")
waiter = client.get_waiter('instance_status_ok')
waiter.wait(
InstanceIds=[instance_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100}
)
@staticmethod
def cancel_spot_instance_request(spot_request_id: str):
session = get_boto_session()
client = session.client("ec2")
response = client.cancel_spot_instance_requests(
SpotInstanceRequestIds=[spot_request_id]
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to stop {spot_request_id}")
print("Wait for spot request(s) to cancel..")
waiter = client.get_waiter('spot_instance_request_fulfilled')
waiter.wait(
SpotInstanceRequestIds=[spot_request_id,],
WaiterConfig={
'Delay': 5,
'MaxAttempts': 100,
}
)
@staticmethod
def create_security_group(instance_name :str) -> str:
session = get_boto_session()
client = session.client("ec2")
# NOTE groupNames are prefixed with: "rxtb-"
response = client.create_security_group(
Description="Created from rxtb",
GroupName=f"rxtb-{instance_name}",
TagSpecifications=[
{'ResourceType': 'security-group',
'Tags': generate_tags(instance_name),
},
],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
group_id = response["GroupId"]
return group_id
@staticmethod
def get_or_create_security_group(instance_name :str) -> str:
session = get_boto_session()
client = session.client("ec2")
# NOTE groupNames are prefixed with: "rxtb-"
response = client.describe_security_groups(
Filters=[
{
'Name': 'group-name',
'Values': [
f"rxtb-{instance_name}",
]
},
]
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
if len(response["SecurityGroups"]) <= 0:
print("Creating sg")
group_id = EC2.create_security_group(instance_name)
else:
group_id = response["SecurityGroups"][0]["GroupId"]
return group_id
@staticmethod
def update_ingress_rules(security_group_id :str, ports :List[dict]):
"""Update ingress rules
Args:
security_group_id : security group id
ports : list of ports in format: {"port": 22, "protocol": "tcp"}
Returns:
None
"""
session = get_boto_session()
security_group = session.resource("ec2").SecurityGroup(security_group_id)
ports = copy.deepcopy(ports)
public_ip_range = get_external_ip() + "/32"
ip_permissions = security_group.ip_permissions
for port in ports:
match = False
for rule in ip_permissions:
for ip_range in rule["IpRanges"]:
if ip_range["CidrIp"] == public_ip_range:
ip_match = True
if port["port"] == rule["FromPort"] and rule["IpProtocol"] == port["protocol"]:
match = True
else:
match = False
if match == False:
print(f"adding ingress rule to security_group, port={port['port']}, protocol={port['protocol']}")
security_group.authorize_ingress(
CidrIp=public_ip_range,
ToPort=port["port"],
FromPort=port["port"],
IpProtocol=port["protocol"],
)
@staticmethod
def create_key_pair() -> Tuple[str, str]:
"""Create an SSH key-pair
Args:
None
Returns:
(key name, fingerprint)
"""
session = get_boto_session()
client = session.client("ec2")
project_name = config.project_name
response = client.create_key_pair(
KeyName=f"rxtb-{get_uuid_part_str()}",
TagSpecifications=[
{'ResourceType': 'key-pair',
'Tags': generate_tags(project_name),
},
],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to create a security_group")
return (response["KeyName"], response["KeyMaterial"])
@staticmethod
def verify_key_pair_name(key_name :str) -> bool:
session = get_boto_session()
client = session.client("ec2")
try:
response = client.describe_key_pairs(
KeyNames=[key_name],
)
if response and response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise Exception(f"Error when trying to describe_key_pairs")
except botocore.exceptions.ClientError as e:
return False
return True
@staticmethod
def get_valid_key_pair_name() -> str:
# load existing key if any
ssh_key_name = config.get_ssh_key().get("key_name", None)
if ssh_key_name == None or EC2.verify_key_pair_name(ssh_key_name) == False:
key_name, private_key = EC2.create_key_pair()
config.add_ssh_key(key_name, private_key)
ssh_key_name = key_name
return ssh_key_name
"""
ssh ec2-user@ec2-18-203-155-39.eu-west-1.compute.amazonaws.com
BOTO OBJECT EXAMPLE
{'Groups': [],
'Instances': [{'AmiLaunchIndex': 0,
'ImageId': 'ami-003a0987ccad642ec',
'InstanceId': 'i-092ecefa5e0781cfd',
'InstanceType': 't2.micro',
'KeyName': 'steffen-ssh-key',
'LaunchTime': datetime.datetime(2019, 9, 5, 6, 25, 2, tzinfo=tzutc()),
'Monitoring': {'State': 'disabled'},
'Placement': {'AvailabilityZone': 'eu-west-1c',
'GroupName': '',
'Tenancy': 'default'},
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209',
'ProductCodes': [],
'PublicDnsName': '',
'State': {'Code': 80, 'Name': 'stopped'},
'StateTransitionReason': 'User initiated (2019-09-05 06:53:58 GMT)',
'SubnetId': 'subnet-9b6255fe',
'VpcId': 'vpc-c6c486a3',
'Architecture': 'x86_64',
'BlockDeviceMappings': [{'DeviceName': '/dev/sda1',
'Ebs': {'AttachTime': datetime.datetime(2019, 9, 5, 6, 25, 3, tzinfo=tzutc()),
'DeleteOnTermination': True,
'Status': 'attached',
'VolumeId': 'vol-0331b158407c04ebb'}}],
'ClientToken': '',
'EbsOptimized': False,
'EnaSupport': True,
'Hypervisor': 'xen',
'NetworkInterfaces': [{'Attachment': {'AttachTime': datetime.datetime(2019, 9, 5, 6, 25, 2, tzinfo=tzutc()),
'AttachmentId': 'eni-attach-0d55e48d960945264',
'DeleteOnTermination': True,
'DeviceIndex': 0,
'Status': 'attached'},
'Description': '',
'Groups': [{'GroupName': 'launch-wizard-7',
'GroupId': 'sg-09e76ae1c227db804'}],
'Ipv6Addresses': [],
'MacAddress': '02:36:28:a3:8f:30',
'NetworkInterfaceId': 'eni-0a6a4fb64050fc8a4',
'OwnerId': '293331033030',
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209',
'PrivateIpAddresses': [{'Primary': True,
'PrivateDnsName': 'ip-172-31-5-209.eu-west-1.compute.internal',
'PrivateIpAddress': '172.31.5.209'}],
'SourceDestCheck': True,
'Status': 'in-use',
'SubnetId': 'subnet-9b6255fe',
'VpcId': 'vpc-c6c486a3',
'InterfaceType': 'interface'}],
'RootDeviceName': '/dev/sda1',
'RootDeviceType': 'ebs',
'SecurityGroups': [{'GroupName': 'launch-wizard-7',
'GroupId': 'sg-09e76ae1c227db804'}],
'SourceDestCheck': True,
'StateReason': {'Code': 'Client.UserInitiatedShutdown',
'Message': 'Client.UserInitiatedShutdown: User initiated shutdown'},
'Tags': [{'Key': 'Name', 'Value': 'steffen-rstudio-test'}],
'VirtualizationType': 'hvm',
'CpuOptions': {'CoreCount': 1, 'ThreadsPerCore': 1},
'CapacityReservationSpecification': {'CapacityReservationPreference': 'open'},
'HibernationOptions': {'Configured': False},
'MetadataOptions': {'State': 'applied',
'HttpTokens': 'optional',
'HttpPutResponseHopLimit': 1,
'HttpEndpoint': 'enabled'}}],
'OwnerId': '293331033030',
'ReservationId': 'r-0dd4d193f8704f1f8'}
"""
# To test
#rxtb start
|
from functools import wraps, partial
from itertools import product, chain
import itertools
import collections
import copy
from enum import Enum
import operator
import random
import unittest
import math
import torch
import numpy as np
from torch._six import inf
import collections.abc
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from torch.testing import make_non_contiguous, make_tensor
from torch.testing._internal.common_dtype import (
_dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types,
floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and,
all_types, double_types, empty_types
)
from torch.testing._internal.common_device_type import \
(onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride,
toleranceOverride, tol, has_cusolver)
from torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater
from torch.testing._internal.common_utils import \
(is_iterable_of_tensors,
random_symmetric_matrix, random_symmetric_psd_matrix,
make_fullrank_matrices_with_distinct_singular_values,
random_symmetric_pd_matrix, make_symmetric_matrices,
make_symmetric_pd_matrices, random_square_matrix_of_rank,
random_fullrank_matrix_distinct_singular_value,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,
torch_to_numpy_dtype_dict, TEST_WITH_ASAN,
GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like)
import torch.testing._internal.opinfo_helper as opinfo_helper
from setuptools import distutils
has_scipy_fft = False
if TEST_SCIPY:
import scipy.special
try:
import scipy.fft
has_scipy_fft = True
except ModuleNotFoundError:
pass
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
# Unique value to distinguish default from anything else
_NOTHING = object()
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
# op with TensorList inputs do not support method or inplace variants.
assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)
self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({', '.join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)
return (sample_tt_input, tt_args, tt_kwargs)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
if isinstance(t, torch.dtype):
return t
return self.transform(to_noncontiguous)
class ErrorInput(object):
"""
A SampleInput that will cause the operation to throw an error plus information
about the resulting error.
"""
__slots__ = ['sample_input', 'error_type', 'error_regex']
def __init__(self, sample_input, *, error_type, error_regex):
self.sample_input = sample_input
self.error_type = error_type
self.error_regex = error_regex
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
# test if a tensor is close to an integer
def close_to_int(x, eps=0.1):
if x.is_complex():
y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x))))
else:
y = torch.abs(torch.frac(x))
return (y < eps) | (y > (1 - eps))
NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val'])
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return a list of SampleInputs (see the class description above).
# Each SampleInput defines an "input", "args", "kwargs",
# an "output_process_fn_grad" function, the "broadcasts_input" bool and
# a "name".
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach can be frustrating to writing an OpInfo can
# be frustrating, but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in opinfo_helper.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
def __init__(self,
name, # the string name of the function
*,
ref=None, # An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
# the following metadata describes the operator, its variants,
# and its aliases, if any
aliases=None, # iterable of aliases, e.g. ("absolute",) for torch.abs
variant_test_name='', # additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
op=None, # the function variant of the operation, populated as torch.<name> if None
method_variant=_NOTHING, # explicitly specifies the method variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
# the following metadata are test directives for skipping or
# modifying tests
skips=tuple(), # information about which tests to skip
decorators=tuple(), # decorators to apply to generated tests
# the following are pointers to functions to generate certain classes
# of inputs
sample_inputs_func=None, # function to generate sample inputs
error_inputs_func=None, # function to generate inputs that will throw errors
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
dtypes, # dtypes this function works with on the CPU,
# inherited by other device types that don't specify their own dtypes
# the following dtypesIf... options override the dtypes value
# on their respective device types
dtypesIfCPU=None, # dtypes this function is expected to work with on the CPU,
# typically unnecessary since it's (now) redundant with the dtypes kwarg above
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
backward_dtypes=None, # backward dtypes this function is expected to work with
backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU
backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA
backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM
default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with
# these dtypes for the op unless otherwise specified.
# This is helpful in reducing the test matrix.
# the following metadata describes the operators out= support
supports_out=True, # whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments
# the following metadata relates to autograd support
supports_autograd=True, # whether the operation supports backward mode AD
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_gradgrad=None, # whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# defaults to support_autograd's value
supports_inplace_autograd=None, # whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_forward_ad=False, # Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck
check_batched_grad=None, # whether to check batched grad when doing gradcheck
# defaults to support_autograd's value
check_batched_gradgrad=None, # whether to check batched grad grad when doing gradgradcheck
# default's to support_gradgrad's value
check_batched_forward_grad=None, # whether to check batched forward grad when doing gradcheck
# defaults to the value of `supports_forward_ad and check_batched_grad`
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
aten_name=None, # name of the corresponding aten:: operator
assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed
autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
# the following metadata relates to sparse support and is used in test_sparse.py
supports_sparse=False, # whether the op supports sparse inputs
supports_scripting=True, # only run tracing tests
# the following metadata relates to sparse csr support and is used in test_sparse_csr.py
supports_sparse_csr=False, # whether the op supports sparse csr inputs
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples=True,
test_neg_view=True,
assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape
):
dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
self.name = name
self.ref = ref
self.aten_name = aten_name if aten_name is not None else name
self.variant_test_name = variant_test_name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes
self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCPU if dtypesIfCPU is not None
else dtypes)
self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (
backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None
else backward_dtypes if backward_dtypes is not None
else dtypesIfROCM if dtypesIfROCM is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes
self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA
self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
self.op = op if op else _getattr_qual(torch, self.name)
method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant
# attributes like real, imag are not callable
self.method_variant = method_variant if callable(method_variant) else None
inplace_name = name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \
if inplace_variant is _NOTHING else inplace_variant
self.operator_variant = getattr(operator, name, None)
self.supports_out = supports_out
self.safe_casts_outputs = safe_casts_outputs
self.decorators = (*decorators, *skips)
self.sample_inputs_func = sample_inputs_func
self.error_inputs_func = error_inputs_func
self.assert_autodiffed = assert_autodiffed
self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []
if autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
else:
self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes
# Autograd support
# Autograd flags that don't depend on backward AD
self.supports_autograd = supports_autograd
self.supports_forward_ad = supports_forward_ad
self.gradcheck_fast_mode = gradcheck_fast_mode
self.gradcheck_wrapper = gradcheck_wrapper
self.gradcheck_nondet_tol = gradcheck_nondet_tol
# Autograd flags that depend on backward AD only
# - If setting has been explicitly set, raise error if inconsistent
if supports_gradgrad is None:
supports_gradgrad = supports_autograd
else:
assert not (supports_gradgrad and not supports_autograd), (
"supports_gradgrad refines the part of autograd is supported, so it should "
"not be set if supports_autograd is False")
if check_batched_grad is None:
check_batched_grad = supports_autograd or supports_forward_ad
else:
assert not (check_batched_grad and not (supports_autograd or supports_forward_ad)), (
"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so "
"it should not be set if supports_autograd is False")
if check_batched_gradgrad is None:
check_batched_gradgrad = supports_gradgrad
else:
assert not (check_batched_gradgrad and not supports_gradgrad), (
"check_batched_gradgrad refines the part of autograd that will be checked (by "
"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd "
"is False.")
if check_batched_forward_grad is None:
check_batched_forward_grad = supports_forward_ad
else:
assert not (check_batched_forward_grad and not supports_forward_ad), (
"check_batched_forward_grad should only be used when supports_forward_ad "
"is True. It is used to disable the test in the specific cases "
"where the op supports both forward ad but fails to compute "
"batched forward grad.")
self.supports_gradgrad = supports_gradgrad
self.check_batched_grad = check_batched_grad
self.check_batched_gradgrad = check_batched_gradgrad
self.check_batched_forward_grad = check_batched_forward_grad
# Autograd flags that depend on both forward AD and backward AD
if supports_inplace_autograd is None:
supports_inplace_autograd = supports_autograd or supports_forward_ad
else:
assert not (supports_inplace_autograd and not supports_autograd and not supports_forward_ad), (
"supports_inplace_autograd refines the part of autograd that is supported, so "
"it should not be set if both supports_autograd and supports_forward_ad are False")
self.supports_inplace_autograd = supports_inplace_autograd
self.supports_sparse = supports_sparse
self.supports_sparse_csr = supports_sparse_csr
self.aliases = ()
if aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]
self.supports_scripting = supports_scripting
self.assert_jit_shape_analysis = assert_jit_shape_analysis
self.test_conjugated_samples = test_conjugated_samples
self.test_neg_view = test_neg_view
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator_variant(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
with torch.no_grad():
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i in range(len(samples)):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
with torch.no_grad():
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def error_inputs(self, device, **kwargs):
"""
Returns an iterable of ErrorInputs.
"""
return self.error_inputs_func(self, device, **kwargs)
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypesIfCPU
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypesIfCPU
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_complex_autograd(self, device_type):
if device_type == 'cpu':
return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)
if device_type == 'cuda':
if TEST_WITH_ROCM:
return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)
else:
return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)
else:
return any(dtype.is_complex for dtype in self.backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
def default_test_dtypes(self, device_type):
"""Returns the default dtypes used to test this operator on the device.
Equal to the operator's default_test_dtypes filtered to remove dtypes
not supported by the device.
"""
supported = self.supported_dtypes(device_type)
return (supported if self._default_test_dtypes is None
else supported.intersection(self._default_test_dtypes))
@property
def formatted_name(self):
"""Returns a formatted full name for this OpInfo that can be used in test names."""
variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''
return '{}{}'.format(self.name.replace('.', '_'), variant)
def _generate_reduction_inputs(device, dtype, requires_grad):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], device, dtype, requires_grad=requires_grad)
yield make_tensor([2], device, dtype, requires_grad=requires_grad)
yield make_tensor([3, 5], device, dtype, requires_grad=requires_grad)
yield make_tensor([3, 2, 1, 2], device, dtype, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.genearte_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
inputs: List[SampleInput] = []
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
inputs.append(SampleInput(
t.detach().clone().requires_grad_(requires_grad),
args=args,
kwargs=kwargs))
return inputs
def _generate_masked_op_mask(input_shape, device, **kwargs):
yield None
yield make_tensor(input_shape, device, torch.bool, requires_grad=False)
if len(input_shape) > 2:
# broadcast last mask dimension:
yield make_tensor(input_shape[:-1] + (1,), device, torch.bool, requires_grad=False)
# broadcast middle mask dimension:
yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], device, torch.bool, requires_grad=False)
# broadcast first mask dimension:
yield make_tensor((1,) + input_shape[1:], device, torch.bool, requires_grad=False)
# mask.ndim < input.ndim
yield make_tensor(input_shape[1:], device, torch.bool, requires_grad=False)
# mask.ndim == 1
yield make_tensor(input_shape[-1:], device, torch.bool, requires_grad=False)
# masks that require broadcasting of inputs (mask.ndim >
# input.ndim) will not be supported, however, we may
# reconsider this if there will be demand on this kind of
# degenerate cases.
def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators.
Masked reduction operator is a reduction operator with trailing
mask optional argument. A mask is a bool tensor with the same
shape as input or a shape that is broadcastable to input shape.
"""
inputs: List[SampleInput] = []
kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims
for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
if(not requires_grad and dtype.is_floating_point and
sample_input.input.ndim == 2 and mask is not None and
mask.shape == sample_input.input.shape):
for v in [torch.inf, -torch.inf, torch.nan]:
t = sample_input.input.clone()
t.diagonal()[:] = v
inputs.append(SampleInput(t.detach().requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked norm.
"""
inputs: List[SampleInput] = []
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_var(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked var.
"""
inputs: List[SampleInput] = []
for unbiased in [False, True]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
if sample_input.args:
dim = sample_input.args[0]
sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:]
sample_input_kwargs = sample_input.kwargs.copy()
else:
dim = sample_input.kwargs.get('dim')
sample_input_args = sample_input.args
sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased)
if requires_grad:
inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs)
orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64),
dim, keepdim=True, mask=inmask)
if orig_count.min() <= int(unbiased):
# Skip samples that lead to singularities in var
# computation resulting nan values both in var and
# autograd output that test_grad_fn cannot handle
# correctly.
continue
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by skipping the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
return sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
kwargs.setdefault('default_test_dtypes', (
torch.uint8, torch.int64, torch.float16, torch.bfloat16, torch.float32, torch.complex64))
super(ReductionOpInfo, self).__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
def sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
if op_info.supports_sparse_csr:
# Tensors with dim=2 for sparse CSR testing
return (SampleInput(make_tensor((L, L), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),)
else:
return (SampleInput(make_tensor((L,), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)))
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
default_test_dtypes=(
torch.uint8, torch.long, torch.half, torch.bfloat16,
torch.float32, torch.cfloat), # dtypes which tests check by default
domain=(None, None), # the [low, high) domain of the function
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
handles_extremals=True, # whether the op correctly handles extremal values (like inf)
handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
supports_sparse=False,
reference_numerics_filter=None, # Filter for singular input values for test_reference_numerics_normal
**kwargs):
super(UnaryUfuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
default_test_dtypes=default_test_dtypes,
sample_inputs_func=sample_inputs_func,
supports_sparse=supports_sparse,
**kwargs)
self.ref = ref
self.domain = domain
self.handles_large_floats = handles_large_floats
self.handles_extremals = handles_extremals
self.handles_complex_extremals = handles_complex_extremals
self.supports_complex_to_float = supports_complex_to_float
self.reference_numerics_filter = reference_numerics_filter
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
args_cases = (
# Cases with tensor indices.
(torch.tensor([1, 2, 3]),),
(torch.tensor(1),),
(torch.tensor([1, 2, 3]), 1),
(torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),
# Cases with list of indices.
((2, 4),),
((2, 4), 1),
((2, 4), -1),
# Cases with integer section.
(3,),
(3, 1),
(3, -1),
)
def generator():
for args in args_cases:
yield SampleInput(make_input((S, S, S)), args=args)
return list(generator())
def sample_inputs_linalg_det(op_info, device, dtype, requires_grad):
kw = dict(device=device, dtype=dtype)
inputs = [
make_tensor((S, S), **kw),
make_tensor((1, 1), **kw), # 1x1
random_symmetric_matrix(S, **kw), # symmetric
random_symmetric_psd_matrix(S, **kw), # symmetric_psd
random_symmetric_pd_matrix(S, **kw), # symmetric_pd
random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null
random_square_matrix_of_rank(S, 1, **kw), # rank1
random_square_matrix_of_rank(S, 2, **kw), # rank2
random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value
make_tensor((3, 3, S, S), **kw), # batched
make_tensor((3, 3, 1, 1), **kw), # batched_1x1
random_symmetric_matrix(S, 3, **kw), # batched_symmetric
random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd
random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd
random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values
make_tensor((0, 0), **kw),
make_tensor((0, S, S), **kw),
]
for t in inputs:
t.requires_grad = requires_grad
return [SampleInput(t) for t in inputs]
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank <= size[-1]
with torch.no_grad():
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs = x.lu()
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
assert (matrix.det().abs() < torch.finfo(dtype).eps * torch.linalg.matrix_norm(matrix)).all().item()
matrix.requires_grad_(requires_grad)
return matrix
def sample_generator():
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield make_singular_matrix_batch_base(shape, rank)
return [SampleInput(t) for t in sample_generator()]
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
inputs = []
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(n,)))
for n in [-4, -2, -1]:
t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)
t.requires_grad = requires_grad
inputs.append(SampleInput(t, args=(n,)))
return inputs
def sample_inputs_hsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6,), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_vsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_dsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),
SampleInput(make_tensor((S, S, 6), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),)
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2]
]
result = []
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
tensors.append(t)
result.append(SampleInput(tensors))
return result
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((2, 2), (2, 3, 2))
ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
inputs: List[SampleInput] = []
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(ord, dim, keepdim)))
return inputs
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input shape, output shape, output stride, output storage offset
test_cases = [
((1,), (1,), (1,), 0),
((3, 3), (2, 2), (1, 2), 0),
((3, 3), (2, 2), (1, 2), 1),
((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),
((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),
]
samples = []
for input_shape, output_shape, stride, storage_offset in test_cases:
input_t = make_arg(input_shape)
kwargs = dict(storage_offset=storage_offset)
samples.append(SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs))
return samples
def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs):
inputs = (
(0,),
(0, 1),
(0, 1, 2, 3),
)
rvals = [1, 2, 4]
products = product(inputs, rvals, [False, True])
samples = []
for input_data, r, with_replacement in products:
input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad)
kwargs = dict(r=r, with_replacement=with_replacement)
samples.append(SampleInput(input_t, kwargs=kwargs))
return tuple(samples)
def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# constructs 1-D tensors with varying number of elements
a = make_arg((0,))
b = make_arg((0, 1))
c = make_arg((0, 1, 2, 3))
samples = []
# sample with only 1 tensor
samples.append(SampleInput(
a
))
# sample with 2 tensors
samples.append(SampleInput(
a,
args=(b,)
))
# sample with 3 tensors
samples.append(SampleInput(
a,
args=(b, c)
))
return tuple(samples)
def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input_shape, dict of dim and eps
cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S), {'dim': 1}),
((S, 2), {'dim': -1}),
((S,), {'dim': 0, 'eps': 0.5}),
((), {'dim': 0}),
((S, S, M), {'dim': 2}),
((S, S), {})
)
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)
# Test for Broadcasting
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2})
yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
return list(generator())
def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for training, momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}),
((3, 2, 4), {'training': False, 'momentum': -1.2}),
((3, 1), {'training': True, 'momentum': 0.0}),
((0,), {'training': True}),
((0,), {'training': False}),
((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}),
((2, 1), {}),
)
def generator():
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1] if len(input_shape) > 1 else 0
weight = make_arg(channels) if channels > 0 else None
bias = make_arg(channels) if channels > 0 else None
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
weight,
bias
),
kwargs=kwargs
)
# Checking for permutations of weights and biases as `None`
weights = [channels, None, None]
biases = [None, channels, None]
is_training = [True, False, False]
for weight, bias, training in zip(weights, biases, is_training):
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
make_arg(channels),
make_arg(channels)
),
kwargs={'training': training}
)
# Test case for no optional kwargs
# running_mean and running_var are required in evaluation mode (training: False) but not in training mode
yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True})
return list(generator())
def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
yield SampleInput(make_arg(shape))
return list(generator())
def sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
for weight in [-1., 0., 0.8, 1.]:
weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg(shape), kwargs=dict(weight=weight_tensor))
if len(shape) >= 2:
channel_size = shape[1]
yield SampleInput(make_arg(shape), kwargs=dict(weight=make_arg((channel_size,))))
return list(generator())
def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (2,), '2'),
((S, S), (0,), '0'),
((S, S), (0.5,), '0_5'),
((S, S), (1,), '1'),
((S, S), (3,), '3'),
((S, S), (-1,), 'neg_1'),
((S, S), (-2,), 'neg_2'),
((S, S), (-0.5,), 'neg_0_5'),
((S, S), (-1.5,), 'neg_1_5'),
)
cases_nonzero_input = (
((S, S, S), (1.5,), '1_5_default'),
((S, S, S), (1.5, 1), '1_5_dim'),
((S, S, S), (1.5, -1), '1_5_neg_dim'),
((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),
((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),
)
cases_negdim_base = (
((S, S), (-2, 1,), 'neg_2_2_dim'),
((S, S), (-1, 1,), 'neg_1_2_dim'),
((S, S), (0, 1,), '0_2_dim'),
((S, S), (1, 1,), '1_2_dim'),
((S, S), (2, 1,), '2_2_dim'),
((S, S), (3, 1,), '3_2_dim'),
((S, S, S), (2, 1), '2_dim'),
((S, S, S), (3, 1), '3_dim'),
((S, S, S), (2, 1, True), 'keepdim_2_dim'),
((S, S, S), (3, 1, True), 'keepdim_3_dim'),
((), (2, 0), '2_dim_scalar'),
((), (3, 0), '3_dim_scalar'),
((), (2, 0, True), 'keepdim_2_dim_scalar'),
((), (3, 0, True), 'keepdim_3_dim_scalar'),
)
cases_negdim = []
for case in cases_negdim_base:
cases_negdim.append(case)
shape, args, name = case
new_args = copy.deepcopy(list(args))
new_args[1] *= -1
cases_negdim.append((shape, tuple(new_args), name.replace("_dim", "_neg_dim")))
def generator():
for shape, args, name in itertools.chain(cases, cases_negdim):
yield SampleInput(make_arg(shape), args=args, name=name)
for shape, args, name in cases_nonzero_input:
yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)
return list(generator())
def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (), 'default'),
((S, S), ('fro',), 'fro_default'),
((S, S), ('fro', [0, 1],), 'fro'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), ('nuc',), 'nuc'),
((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (-inf,), '-inf'),
((S, S), (inf,), 'inf'),
((S, S), (inf, 1,), 'inf_2_dim'),
((S, S), (inf, -1,), 'inf_2_neg_dim'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):
size_1D = (S,)
size_2D = (2, 2)
test_cases = [
# input size, ord, dim args
(size_1D, 2, None),
(size_1D, 2, (0,)),
(size_1D, 0, None),
(size_1D, 0, (0,)),
(size_1D, 0.9, None),
(size_1D, 0.9, (0,)),
(size_1D, 1, None),
(size_1D, 1, (0,)),
(size_1D, -2.1, None),
(size_1D, -2.1, (0,)),
(size_1D, inf, None),
(size_1D, inf, (0,)),
(size_1D, -inf, None),
(size_1D, -inf, (0,)),
(size_2D, 2, None),
(size_2D, 2, (0,)),
(size_2D, 2, (-1, 0)),
(size_2D, 0, None),
(size_2D, 0, (0,)),
(size_2D, 0, (-1, 0)),
(size_2D, 0.9, None),
(size_2D, 0.9, (0,)),
(size_2D, 0.9, (-1, 0)),
(size_2D, 1, None),
(size_2D, 1, (0,)),
(size_2D, 1, (-1, 0)),
(size_2D, -2.1, None),
(size_2D, -2.1, (0,)),
(size_2D, -2.1, (-1, 0)),
(size_2D, inf, None),
(size_2D, inf, (0,)),
(size_2D, inf, (-1, 0)),
(size_2D, -inf, None),
(size_2D, -inf, (0,)),
(size_2D, -inf, (-1, 0)),
]
inputs = []
for test_size, ord, dim in test_cases:
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim,
dim=dim)))
return inputs
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self, name, *,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float
always_returns_bool=False, # Set to true if the op always returns bool tensors
**kwargs):
super().__init__(name, **kwargs)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
self.promotes_int_to_float = promotes_int_to_float
self.always_returns_bool = always_returns_bool
def _resolve_binary_pwise_kwargs(
op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None
):
"""Resolves default values for :func:`sample_inputs_binary_pwise`.
By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty
dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`
and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.
"""
if op_kwargs is None:
op_kwargs = {}
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs
def sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
*,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
scalar = make_tensor((), device=device, dtype=dtype, **rhs_make_tensor_kwargs)
if python_scalars:
scalar = scalar.item() # type: ignore[assignment]
shapes = [
((), scalar),
((S,), scalar),
((S, 1), (S,)),
((M, S), scalar),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
]
sample_inputs = []
for shape_lhs, shape_rhs_or_scalar in shapes:
lhs = make_tensor(
shape_lhs,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**lhs_make_tensor_kwargs,
)
if isinstance(shape_rhs_or_scalar, tuple):
# shape
rhs = make_tensor(
shape_rhs_or_scalar,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**rhs_make_tensor_kwargs,
)
broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs
else:
# scalar
rhs = shape_rhs_or_scalar # type: ignore[assignment]
broadcasts_input = False
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input))
return sample_inputs
def sample_inputs_add_sub(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
alpha=1,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False))
return sample_inputs
def sample_inputs_isclose(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
rtols = [0., 1e-7]
atols = [0., 1e-7]
equal_nans = [False, True]
products = product(rtols, atols, equal_nans)
for rtol, atol, equal_nan in products:
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,),
kwargs=dict(op_kwargs, rtol=rtol, atol=atol, equal_nan=equal_nan)))
return sample_inputs
def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((1, 2))),
SampleInput(make_arg((2,))),
SampleInput(make_arg(())))
def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
first_shape, second_shape = (S, M), (M, S)
sample_inputs = []
sample_inputs.append(
SampleInput(make_arg(first_shape, requires_grad=requires_grad),
args=(make_arg(second_shape, requires_grad=requires_grad),)))
if dtype.is_complex:
sample_inputs.append(
SampleInput(make_arg(first_shape, requires_grad=requires_grad),
args=(make_arg(second_shape).conj().requires_grad_(requires_grad),)))
sample_inputs.append(
SampleInput(
make_arg(first_shape).transpose(0, 1).requires_grad_(requires_grad),
args=(make_arg(second_shape).transpose(0, 1).conj().requires_grad_(requires_grad),)))
return sample_inputs
def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)
tests_list = [
((2, 3), (2, 2), (2, 3), False)
]
tests_with_lhs_broadcasting = [
((1,), (2, 2), (2, 3), True),
((), (2, 2), (2, 3), True)
]
test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]
sample_inputs = []
for shape_a, shape_b, shape_c, broadcasts_input in test_cases:
sample_inputs.append(
SampleInput(
make_tensor(shape_a, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape_b, device, dtype,
requires_grad=requires_grad),
make_tensor(shape_c, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shape = (3, 3)
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj(),
make_tensor(shape, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad),
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj()),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
return sample_inputs
def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):
sample_inputs = []
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
))
if dtype.is_complex:
# dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)
# is tested in test_conj_view (which tests operations with only conjugated input tensor
# -- not conjugated arg tensors)
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
torch.conj(make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
)
))
return sample_inputs
def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (((S,), (S, M), (M,), 1, 1, False),
((S,), (S, M), (M,), 0.2, 0.6, False),
)
test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),
((1,), (S, M), (M,), 0.2, 0.6, True),
((), (S, M), (M,), 1, 1, True),
((), (S, M), (M,), 0.2, 0.6, True),
)
cases = test_cases + test_cases_with_broadcast
def generator():
# addmv performs: beta * M + alpha * (mat @ vec)
for M, mat, vec, beta, alpha, broadcasts_input in cases:
yield SampleInput(make_arg(M), args=(make_arg(mat), make_arg(vec)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting
test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
def generator():
for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:
if dtype.is_complex:
beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)
return list(generator())
def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [(((S, S), (S, S), (S, S)), False),
(((S, S), (S, 1), (1, S)), False),
(((1,), (S, S, 1), (1, S)), True),
(((), (), ()), False),
(((S, S), (), ()), True),
(((), (S, S, 1), (1, S)), True)
]
sample_inputs = []
for input_args, broadcasts_input in test_cases:
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
broadcasts_input=broadcasts_input))
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
sample_inputs = []
for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:
args = (make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch1_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch2_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))
if dtype.is_complex:
sample_inputs.append(SampleInput(
args[0].detach().clone().requires_grad_(requires_grad),
args=(args[1].detach().clone().requires_grad_(requires_grad),
args[2].detach().clone().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shapes = [(S, S, S), (S, M, S), (S, S, M)]
args = (make_tensor(shapes[0], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[1], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[2], device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(
SampleInput(
args[0].transpose(-1, 1).detach().requires_grad_(requires_grad),
args=(args[1].transpose(-1, 1).conj().detach().requires_grad_(requires_grad),
args[2].transpose(-1, 1).conj().detach().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))
return tuple(sample_inputs)
def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):
input1 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))
input2 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
broadcasts_input=True)
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
input3 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
input4 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha),
broadcasts_input=True)
return (input1, input2, input3, input4)
def sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_xlog1py(self, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
# same shape
yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))
# rhs broadcast
yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))
# all zero `x`
with torch.no_grad():
x = make_arg((S, S))
x.fill_(0)
yield SampleInput(x, args=(make_arg((S, S), low=-1),))
# randomly zero-masked `x`
x = make_arg((S, S))
y = make_arg((S, S), low=-1)
with torch.no_grad():
x[torch.rand(x.shape) > 0.5] = 0
yield SampleInput(x, args=(y,))
# Scalar x
# `input` has to be a tensor
# yield SampleInput(0, args=(make_arg((S, S), low=-1),))
# yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))
# Scalar y
yield SampleInput(make_arg((S, S)), args=(-0.5,))
yield SampleInput(make_arg((S, S)), args=(1.2,))
return list(generator())
def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = ((), (S, S, S), (S,))
def generator():
for shape in cases:
yield(SampleInput(make_arg(shape)))
return list(generator())
def sample_inputs_logsumexp(self, device, dtype, requires_grad):
inputs = (
((), (0,), True),
((S, S), (1,), True),
((S, S), (1,), False)
)
samples = []
for shape, dim, keepdim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim, keepdim)))
return tuple(samples)
def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), {}),
((S, S), {}),
((0, S, 0), {}),
((S,), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), {'dtype': torch.double}),
((S,), {'device': 'cpu'}),
((S,), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), {'device': 'cuda'}))
samples = []
for shape, kwargs in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, kwargs=kwargs))
return tuple(samples)
def get_independent_tensor(tensor):
return tensor.detach().clone().requires_grad_(tensor.requires_grad)
def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs):
samples = []
low = 2
high = 10
for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
# With high
samples.append(SampleInput(
sample.input,
args=(high,) + sample.args,
kwargs=sample.kwargs))
# With low and high
samples.append(SampleInput(
get_independent_tensor(sample.input),
args=(low, high,) + sample.args,
kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), (), {}),
((S, S), (2, 0), {}),
((0, S, 0), (3, 2, 2), {}),
((S,), (2, 3), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), (10,), {'dtype': torch.double}),
((S,), (1, 1, 12), {'device': 'cpu'}),
((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), (7, 2), {'device': 'cuda'}))
samples = []
for input_shape, output_shape, kwargs in inputs:
t = make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], 'cpu', dtype).item()
samples = []
for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
# The scalar we are passing to new_full must be the same dtype
# as the one of the resulting tensor
use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype
samples.append(SampleInput(
sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], 'cpu', dtype).item()
inputs = [
((), get_val(dtype), {}),
((S, S), get_val(dtype), {}),
((0, S, 0), get_val(dtype), {}),
((S,), get_val(dtype), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), get_val(torch.double), {'dtype': torch.double}),
((S,), get_val(dtype), {'device': 'cpu'}),
((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), get_val(dtype), {'device': 'cuda'}))
samples = []
for shape, fill_value, kwargs in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_logcumsumexp(self, device, dtype, requires_grad):
inputs = (
((S, S, S), 0),
((S, S, S), 1),
((), 0),
)
samples = []
for large_number in (True, False):
for shape, dim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
if large_number and t.dim() > 0:
with torch.no_grad():
t[0] = 10000
samples.append(SampleInput(t, args=(dim,)))
return tuple(samples)
def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2, 1, 0.5)),
((S, S, S), (2, -1, 0.5)),
((S, S, S), (1, 2, 3)),
((S, S, S), (float('inf'), 2, 0.5)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((1, 2, 3), (-1, -2)),
((1, 2, 3), (-1, 2)),
((1, 2, 3), (1, -2)),
((1, 2, 3), (1, 2)),
((), (0, 0)),
((1, ), (0, 0)),
((M, M), (0, 1)),
((S, S, S), (2, 0)), )
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S))
return list(SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_T(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((), (M, M))
return list(SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always invertible input for linear algebra ops using
random_fullrank_matrix_distinct_singular_value.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to
test the backward method of `linalg_pinv`. That way we always preserve the rank of the
input no matter the perturbations applied to it by the gradcheck.
Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.
"""
batches = [(), (0, ), (2, ), (1, 1)]
# the size of at least 30 is required to cause failures for the previous implicit implementation
# of the pinv's backward method, albeit it is slow.
size = [0, 3, 50]
def generate_samples():
for batch, m, n in product(batches, size, size):
for k in range(min(3, min(m, n))):
# Note that by making the columns of `a` and `b` orthonormal we make sure that
# the product matrix `a @ b.t()` has condition number 1 when restricted to its image
a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
yield SampleInput(a, args=(b,))
return list(generate_samples())
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# autograd is not supported for inputs with zero number of elements
shapes = ((S, S),
(2, S, S),
(2, 1, S, S), )
def generator():
for shape in shapes:
yield SampleInput(make_arg(shape))
return list(generator())
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(
SampleInput(
make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(shape,)) for size, shape in test_cases)
def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (((1, S), (2, S), (3, S),),)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
(S, S, S),
(S,),
(),
)
sample_inputs = []
for size in test_cases:
tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)
tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)
sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))
sample_inputs.append(SampleInput(tensor1, args=(2,)))
return tuple(sample_inputs)
def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):
small_S = 2
test_cases = (
((S, S, 2), (S, S + 1, 2)),
((S, S), (S, S)),
((S, S, S), (S, S, S)),
((3, 5), (3, 5)),
((2, 3, 5), (2, 3, 5)),
((1, 2, 3), (1, 2, 3)),
((1, 1), (S, 1)),
((0, 5), (4, 5)),
((4, 5), (0, 5)),
((0, 4, 5), (3, 5)),
((4, 5), (0, 3, 5)),
((0, 4, 5), (1, 3, 5)),
((1, 4, 5), (0, 3, 5)),
# Using S here would make this one test take 9s
((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),
((small_S, 1, 1, small_S), (1, small_S, small_S)),
((1, 1, small_S), (small_S, 1, small_S, small_S)),
)
samples = []
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
# FIXME add an override for JIT and revert 0. back to 0
# since it's accepted by eager
for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]:
for t1_size, t2_size in test_cases:
# The args should never be non-contiguous as this is not supported in the backward
samples.append(SampleInput(
make_tensor(t1_size, device, dtype, requires_grad=requires_grad),
args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad), p, cm)))
return samples
def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
cases = (((S, S, S), (1,)),
((), (1,)),
# For requires_grad=False below,
# check https://github.com/pytorch/pytorch/issues/59137
((S, S, S), (make_arg((), requires_grad=False),)))
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, S, S), (S, S, S), False),
((S, S, S), (), False),
((S, S, S), (1,), False),
((S,), (1,), False),
((), (), False),
)
test_cases_lhs_broadcasting = (
((S, 1, S), (S, S, S), True),
((1,), (S, S, S), True),
((1, S), (1, 1, S), True),
((), (0,), True),
((), (S, S, S), True),
)
cases = test_cases + test_cases_lhs_broadcasting
sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
broadcasts_input=broadcasts_input)
for first_shape, second_shape, broadcasts_input in cases)
equal_tensors_non_bool = (
([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),
([[[6, 5]], [[1, -5]]]),
([[2], [-1]]),
([0, -6]),
([3],),
)
equal_tensors_bool = (
([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),
([[[1, 1]], [[1, 0]]]),
([[1], [0]]),
([0, 1]),
([1],),
)
more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool
more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),
args=(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),))
for elements in more_cases)
sample_inputs = [*sample_inputs, *more_inputs]
return tuple(sample_inputs)
def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors, args=(0,)),)
def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]
((S, S), (S, S), {'dim': -1}),
((S, S), (S, S), {'dim': 1}),
((M, S), (S, S), {'dim': 0}), # different shapes
((1, 2, 3), (1, 2, 3), {'dim': -2}),
((0,), (0,), {'dim': 0}), # empty tensor
((0, S), (S, S), {'dim': 0}),
((1,), (1,), {}) # dim not passed, fallback to default
)
def generator():
for input_shape1, input_shape2, kwargs in cases:
yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)
return list(generator())
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors),)
def sample_inputs_hypot(op_info, device, dtype, requires_grad):
input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
return (
SampleInput(input, args=(args,)),
)
def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
# Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006
SampleInput(
make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([], dtype=torch.uint8, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S), 1, S, True, device=device), 0)),
# `indices` broadcast
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),
# `self` broadcast
SampleInput(make_tensor((1, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),
# without `dim` arg
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), )),
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device),)),
)
def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):
test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S, S), {}),
((S, S, S), {'dim': 1}),
((S, S, S), {'dim': 1, 'keepdim': True}),
((), {'dim': 0}),
((), {}),
((), {'dim': 0, 'keepdim': True}),
)
samples: List[SampleInput] = []
for shape, kwargs in test_cases:
samples.append(SampleInput(
make_tensor(shape, device, dtype, requires_grad=requires_grad),
kwargs=kwargs))
return samples
def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((S, S, S), 1, None, None),
((S, S, S), 2, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),
((S, S, S), 2, (S, S, 1), (S, S, 1)),
((S, S, S), 2, (S, S, S), (S, S, S)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
prepend_size = 0 if (size_prepend is None) else size_prepend[dim]
append_size = 0 if (size_append is None) else size_append[dim]
dim_size = size[dim] + prepend_size + append_size
for n in range(dim_size):
input_tensor = make_arg(size)
prepend = make_arg(size_prepend) if size_prepend else None
append = make_arg(size_append) if size_append else None
sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,)))
# add some samples with n > dim_size
sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,)))
sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),)))
return sample_inputs
def sample_inputs_histogram(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):
input_tensor = make_arg(size)
weight_tensor = make_arg(size) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = make_arg((bin_ct + 1,))
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histogramdd(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S))
bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3))
sample_inputs = []
for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]):
input_tensor = make_arg(size)
bin_ct = bin_ct_pattern[:size[-1]]
weight_tensor = make_arg(size[:-1]) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = [make_arg(ct + 1) for ct in bin_ct]
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histc(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, min, max in product(sizes, [0, -10], [0, 10]):
# construct sample input omitting bins arg
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(min=min, max=max)))
# construct sample inputs with a few different bins values
for bins in [1, 3, 10]:
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(bins=bins, min=min, max=max)))
return sample_inputs
def sample_inputs_bincount(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs = []
for size, weighted in product((S, M), [False, True]):
input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device)
weight_tensor = make_arg((size,)) if weighted else None
max_val = int(input_tensor.max().item())
for minlength in [0, max_val // 2, max_val, 2 * max_val]:
sample_inputs.append(SampleInput(input_tensor,
kwargs=dict(weights=weight_tensor, minlength=minlength)))
return sample_inputs
def sample_inputs_bucketize(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, out_int32, right in product(sizes, [False, True], [False, True]):
input_tensor = make_arg(size)
boundaries = make_arg((S,)).msort()
sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ),
kwargs=dict(out_int32=out_int32, right=right)))
return sample_inputs
def sample_inputs_searchsorted(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M))
inputs = []
for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]):
unsorted_tensor = make_arg(size, noncontiguous=noncontiguous)
input_tensor = make_arg(size, noncontiguous=noncontiguous)
if np.product(size) == 0:
boundary_tensor = unsorted_tensor
sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous)
else:
boundary_tensor, sorter = torch.sort(unsorted_tensor)
side = "right" if right else "left"
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right)))
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter)))
return inputs
def sample_inputs_gradient(op_info, device, dtype, requires_grad):
sample_inputs = []
test_cases_float = (
((S,), None, None, 1),
((S,), 2., None, 1),
((S, S), None, None, 2),
((S, S), [2.0, 2.1], None, 1),
((S, S), [2.0, 2.1], (0, 1), 1),
((4, 4, 4), [2., 1.], (0, 1), 2),
)
for size, spacing, dim, edge_order in test_cases_float:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))
test_cases_tensor = (
((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),
((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),
)
for size, coordinates, dim, edge_order in test_cases_tensor:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
coordinates_tensor_list = []
for coords in coordinates:
# `coords` will always contain floating point values and Python 3.10 does not support this
# implicit conversion to an integer using `__int__`
# TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed
a = torch.tensor(coords, device=device)
coordinates_tensor_list.append(a.to(dtype))
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))
return tuple(sample_inputs)
def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, index_variable(2, S, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
test_args = [
([1, 2],),
(slice(0, 3),),
([slice(0, 3), 1],),
([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),
([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),
([slice(None), slice(None), [0, 3]],),
([slice(None), [0, 3], slice(None)],),
([[0, 3], slice(None), slice(None)],),
([[0, 3], [1, 2], slice(None)],),
([[0, 3], ],),
([[0, 3], slice(None)],),
([[0, 3], Ellipsis],),
([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),
(index_variable(2, S, device=device),),
(mask_not_all_zeros((S,)),),
]
return tuple(SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=args)
for args in test_args)
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for accumulate in [False, True]:
# Test with indices arg
inputs.append(SampleInput(
make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(index_variable(2, S, device=device), ),
make_tensor((2, S), device, dtype, low=None, high=None)),
kwargs=dict(accumulate=accumulate)))
# Test with mask arg
mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))
inputs.append(SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(mask, ),
make_tensor((S,), device, dtype, low=None, high=None),),
kwargs=dict(accumulate=accumulate)))
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):
# These testa are pretty much the same as those from index_copy.
# Perhaps merge?
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
idx = make_arg((S,), dtype=torch.int64, low=0, high=S, requires_grad=False)
samples = [SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(1,
idx.detach().clone(),
s.detach().clone().requires_grad_(requires_grad)))]
for alpha in (-1, 0, 2):
samples.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(1,
idx.detach().clone(),
s.detach().clone().requires_grad_(requires_grad)),
kwargs=dict(alpha=alpha)))
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx.detach().clone(), s.detach().clone())) for t, idx, s in product(ts, idxs, ss))
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx.detach().clone(), s.detach().clone()),
kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))
return samples
def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):
def small_3d_unique():
res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)
res = res.to(dtype).requires_grad_(requires_grad)
return res
def large_1d_unique():
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype).requires_grad_(requires_grad)
return res
samples = []
# Test case for large tensor.
samples.append(SampleInput(large_1d_unique()))
# Test cases for small 3d tensors.
# Imitates legacy tests from test/test_torch.py
dims = range(-3, 3)
flag = [True, False]
for dim, descending, stable in product(dims, flag, flag):
# default schema without stable sort
samples.append(SampleInput(small_3d_unique(),
args=(dim, descending)))
# schema with stable sort, no CUDA support yet
if torch.device(device).type == 'cpu':
samples.append(
SampleInput(small_3d_unique(),
kwargs=dict(dim=dim, descending=descending, stable=stable))
)
# Test cases for scalar tensor
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0,)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0, True)))
# Test cases for stable sort
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, descending=True, stable=True)))
return samples
def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S))
samples = []
for x_size in sizes:
# threshold and values args must be numbers
samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item())))
return samples
def sample_inputs_argsort(*args, **kwargs):
return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if "stable" not in sample_input.kwargs]
def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for shape, sorted, return_inverse, return_counts, dim in \
product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]):
# torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim
if 0 in shape and shape.index(0) is not dim:
continue
# skip invalid dim args
if dim is not None and (dim < -len(shape) or dim >= len(shape)):
continue
kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
# construct a test case with only one distinct value
input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with mixed 0s and 1s
input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\
.to(dtype).requires_grad_(requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with many different values
input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
return sample_inputs
def sample_inputs_unique_consecutive(*args, **kwargs):
def generator():
for sample_input in sample_inputs_unique(*args, **kwargs):
if not sample_input.kwargs["sorted"]:
sample_input.kwargs.pop("sorted")
yield sample_input
return list(generator())
def sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
index_tensor = partial(torch.tensor, device=device, dtype=torch.long)
samples = []
fill_val = torch.tensor(-1 + 1j if dtype.is_complex else -1)
idx = index_variable(1, S, device=device)
ndim = 3
for d in range(ndim):
samples.append(SampleInput(make_arg((S,) * ndim), args=(d, idx, fill_val)))
samples.append(SampleInput(make_arg((S,) * ndim), args=(d, -idx - 1, fill_val)))
def unique_idx(numel, max_idx):
# Generate unique random indices vector of `numel`
# elements in range [0, max_idx).
indices = random.sample(range(max_idx), numel)
return index_tensor(indices)
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))
# Duplicate indices
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))
return samples
def sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_binary_op = (
((S, S, S), (S, S, S),),
((S, S, S), (S,),),
((S,), (S, S, S),),
((S, 1, S), (S, S),),
((S, S), (S, S),),
((), (),),
((S, S, S), (),),
((), (S, S, S),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(make_tensor(other_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),),))
for input_tensor, other_tensor in args_for_binary_op)
return inputs
def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8), (5,)),
((3, 8, 8), 5),
((3, 8, 8), 1)
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((1, 8, 8, 8), (5, 7)),
((2, 8, 8, 8), (None, 7)),
((1, 8, 4, 3), (5, None)),
((1, 8, 4, 3), (None, None)),
((1, 8, 4, 3), (5)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8, 8, 8), (5, 7, 4)),
((1, 8, 4, 3, 7), (None, None, None)),
((1, 8, 4, 3, 7), (1, 1, 1)),
((3, 3, 8, 8, 6), (5, 7, None)),
((1, 3, 8, 8, 6), (5, None, 2)),
((3, 3, 8, 8, 6), (None, 3, 2)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8), (5,)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((3, 4, 4), 3),
((3, 4, 4), 1)
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8), (5, 7)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 4), (2, 3)),
((2, 4, 4, 4), (None, 3)),
((2, 4, 4, 4), (1, 1)),
((1, 4, 4, 3), (3, None)),
((1, 4, 4, 3), (None, None)),
((1, 4, 4, 3), (3)),
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8, 8), (5, 7, 4)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 3, 5), (None, None, None)),
((1, 4, 4, 3, 5), (1, 1, 1)),
((3, 3, 4, 4, 6), (2, 3, None)),
((1, 3, 4, 4, 6), (3, None, 2)),
((3, 3, 4, 4, 6), (None, 3, 2)),
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
class _TestParamsMaxPoolBase(object):
def __init__(self):
self.kwargs = {
'kernel_size': [3],
'stride': [2, None],
'ceil_mode': [True, False],
'padding': [0, 1],
'dilation': [1],
'return_indices': [True, False]
}
self.shapes = [
[1, 2, None], # batch
[2], # channels
[3, 6] # signal
]
def _gen_shape(self):
for shape in product(*self.shapes):
# shape[0] is None indicates missing batch dimension
if shape[0] is None:
shape = shape[1:]
yield shape, torch.contiguous_format
# only 2d (N, C, H, W) rank 4 tensors support channels_last memory format
if len(self.shapes) == 4 and len(shape) == 4:
yield shape, torch.channels_last
def _gen_kwargs(self):
keys = self.kwargs.keys()
for values in product(*self.kwargs.values()):
yield dict(zip(keys, values))
def gen_input_params(self):
yield from product(self._gen_shape(), self._gen_kwargs())
class _TestParamsMaxPool1d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3,)]
self.kwargs['stride'] += [(2,)]
self.kwargs['padding'] += [(1,)]
self.kwargs['dilation'] += [(1,)]
class _TestParamsMaxPool2d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2)]
self.kwargs['stride'] += [(2, 1)]
self.kwargs['padding'] += [(1, 1)]
self.kwargs['dilation'] += [(1, 2)]
self.shapes.append([6])
class _TestParamsMaxPool3d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2, 3)]
self.kwargs['stride'] += [(2, 1, 2)]
self.kwargs['dilation'] += [(1, 2, 1)]
self.shapes.append([6])
self.shapes.append([5])
def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
params_generator_type_dict = {
'nn.functional.max_pool1d': _TestParamsMaxPool1d,
'nn.functional.max_pool2d': _TestParamsMaxPool2d,
'nn.functional.max_pool3d': _TestParamsMaxPool3d,
}
def generator():
params_generator = params_generator_type_dict[op_info.name]()
for (shape, memory_format), kwargs in params_generator.gen_input_params():
arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad)
yield SampleInput(arg, kwargs=kwargs)
return list(generator())
def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((2, 1, 4, 5), {'p': 1., 'dim': 2}),
((2, 3, 4, 5), {'p': 2., 'dim': 1}),
((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),
((1, 3, 4, 5), {'p': -1., 'dim': 1}),
((1, 3, 4, 5), {'p': 0., 'dim': -1}),
((), {'p': 1.2, 'dim': 0}),
((2, 3, 4, 5), {}),
((2, 3, 4, 5), {'eps': 1e-4}))
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4), (3, 3, 3), (3,),
{'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}),
((2, 2, 4), (2, 2, 4), (4,),
{'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}),
((1, 1, 4), (1, 1, 4), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}),
((1, 1, 4), (1, 2, 3), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5), (4, 8, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),
((2, 2, 4, 4), (2, 2, 4, 5), (4,),
{'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),
((1, 1, 4, 5), (1, 1, 4, 3), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 1, 4, 3), (1, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5), (4, 8, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,),
{'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}),
((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,),
{'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}),
((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}),
((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias,
# and a dict of values of (stride, padding, dilation, groups)
cases: Tuple = (
((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}),
((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}),
((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}),
((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}),
# With defaults
((1, 4, 5), (3, 4, 3), None, {}),
)
# TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged
# Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck
# in test/test_nn.py
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, groups, dilation)
cases: Tuple = (
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'groups': 1}),
((2, 4, 8, 8), (2, 2, 3, 3), (2,),
{'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 2, 4, 3), (4, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'groups': 1}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': "valid"}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 1, 'padding': "same", 'dilation': 3}),
# Below are the group related samples from common_nn.py
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}),
((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}),
((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}),
# With defaults
((1, 4, 5, 5), (3, 4, 3, 3), None, {}),
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, num groups, and eps
cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment]
((1, 6, 3), 2, 0.5),
((2, 6, 3), 2, -0.5),
((1, 2), 1, None),
((0, 2), 1, None),
)
def generator():
for input_shape, num_groups, eps in cases:
# Shape of weight and bias should be the same as num_channels
weight = make_arg(input_shape[1])
bias = make_arg(input_shape[1])
kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps}
yield SampleInput(
make_arg(input_shape),
args=(num_groups,),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=(1,))
return list(generator())
def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'momentum': 0.5, 'eps': 0.6}),
((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}),
((3, 2, 4), {'momentum': -1.2}),
((3, 2, 4), {'momentum': 0.0}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
)
def generator():
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1]
weight = make_arg(channels)
bias = make_arg(channels)
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
new_kwargs = {
'running_mean': running_mean,
'running_var': running_var,
'weight': weight,
'bias': bias,
**kwargs
}
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs=new_kwargs
)
# Checking for permutations of weights and biases as `None`
# instance_norm assumes that if there's a bias, there's a weight
weights = [channels, None]
biases = [None, None]
for weight_channels, bias_channels in zip(weights, biases):
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs={
'running_mean': running_mean,
'running_var': running_var,
'weight': make_arg(weight_channels) if weight_channels is not None else None,
'bias': make_arg(bias_channels) if bias_channels is not None else None
}
)
# Test case for no optional kwargs
yield SampleInput(make_arg((1, 2, 3)), kwargs={})
return list(generator())
def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, normalized_shape and a kwarg dict for eps
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 2, 3), (1, 2, 3), {'eps': 0.5}),
((2, 2, 3), (2, 3), {'eps': -0.5}),
((1,), (1,), {}),
((1, 2), (2,), {}),
((0, 1), (1,), {}),
)
def generator():
for input_shape, normalized_shape, kwargs in cases:
# Shape of weight and bias should be the same as normalized_shape
weight = make_arg(normalized_shape)
bias = make_arg(normalized_shape)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, bias),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=((2,),))
# TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,
# enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400
# With weight and a `None` bias
# yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))
# With `None` weight and bias (tests failing for this, see the link above)
# yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))
return list(generator())
def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, size and a kwarg dict for alpha, beta, and k
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}),
((1, 6, 3), 2, {'alpha': 3e-05}),
((1, 6, 3), 2, {'beta': 0.5}),
((1, 6, 3), 2, {'k': 1.25}),
((1, 6, 3), 2, {}),
((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
)
def generator():
for input_shape, size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs)
return list(generator())
def sample_inputs_hardswish(self, device, dtype, requires_grad):
N = 5
# make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]
return tensors
def sample_inputs_linear(self, device, dtype, requires_grad):
features_options = [[3, 4], [8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor = create_tensor(batch_shape + [in_feat])
weight = create_tensor([out_feat, in_feat])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor, args=(weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor, args=(weight, bias)))
return sample_inputs
def sample_inputs_bilinear(self, device, dtype, requires_grad):
features_options = [[3, 4, 5], [8, 8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor1 = create_tensor(batch_shape + [in_feat1])
input_tensor2 = create_tensor(batch_shape + [in_feat2])
weight = create_tensor([out_feat, in_feat1, in_feat2])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias)))
return sample_inputs
def sample_inputs_glu(self, device, dtype, requires_grad):
features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for features, batch_shape in itertools.product(features_options, batch_options):
ndim = len(features) + len(batch_shape)
for dim in range(ndim):
input_tensor = create_tensor(batch_shape + features)
dim_size = input_tensor.size(dim)
if dim_size > 0 and dim_size % 2 == 0:
sample_inputs.append(SampleInput(input_tensor, args=(dim,)))
return sample_inputs
def sample_inputs_interpolate(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
align_corners_options: Tuple[Any, ...] = (None,)
if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):
align_corners_options = (True, False, None)
ranks_for_mode = {
'nearest': [1, 2, 3],
'linear': [1],
'bilinear': [2],
'bicubic': [2],
'trilinear': [3],
'area': [1, 2, 3]
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
args=(shape(S, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(shape(L, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 1.7, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 0.6, mode, align_corners)),
])
return sample_inputs
def sample_inputs_upsample(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
ranks_for_mode = {
'nearest': [1, 2, 3],
'bilinear': [2],
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(S, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(L, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=1.7)),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=0.6)),
])
return sample_inputs
def sample_inputs_gelu(self, device, dtype, requires_grad):
N = 5
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-3, high=3)) for _ in range(1, N)]
return tensors
def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_reduction_with_dim = (
((S, S, S), (1,),),
((S, S, S), (1, True, ),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args_for_reduction_with_dim)
return inputs
def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
inputs.append(SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
return inputs
def _generate_nan_reduction_inputs(device, dtype, requires_grad):
yield from _generate_reduction_inputs(device, dtype, requires_grad)
yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)
yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)
def sample_inputs_nan_reduction(supports_multiple_dims):
# Generates sample inputs for reduction ops that contain the input tensor
# and dim and keepdim kwargs. If a reduction op needs to test additional
# args/kwargs then create a separate sample_inputs function
def fn(op_info, device, dtype, requires_grad):
inputs = []
for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
kwargs=kwargs))
return inputs
return fn
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):
test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))
test_interpolations = ['linear', 'midpoint']
inputs = []
for quantiles in test_quantiles:
for t in _generate_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(quantiles,)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):
# Interpolation kwarg for now is only supported when providing both dim and keepdim
kwargs.setdefault('dim', 0)
kwargs.setdefault('keepdim', False)
for interpolation in test_interpolations:
kwargs['interpolation'] = interpolation
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(quantiles,), kwargs=kwargs))
return inputs
def sample_inputs_reduction_count_nonzero(*args, **kwargs):
"""Sample inputs for count_nonzero"""
samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)
# count_nonzero does not support keepdim yet
for sample in samples:
sample.kwargs.pop('keepdim', None)
return samples
def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((1, 3, 9, 9), 3),
((1, 3, 9, 9), (4, 4)),
((1, 3, 9, 9), (6, 6)),
((2, 3, 9, 9), (3, 3)),
((1, 1, 4, 4), (2, 2)),
((1, 2, 6, 6), (4, 4)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((2, 3, 5, 5, 5), (2, 2, 2)),
((1, 2, 6, 5, 4), 2),
((1, 2, 5, 6, 5), (2, 3, 2)),
((1, 2, 6, 6, 6), (2, 3, 2)),
((1, 1, 7, 6, 7), (2, 3, 4)),
((1, 1, 4, 5, 4), (2, 2, 1)),
((1, 1, 8, 7, 6), (4, 3, 2)),
((0, 1, 4, 5, 4), (2, 2, 1)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),
((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),
((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),
((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),
((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),
((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))
def generator():
for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:
yield SampleInput(make_arg(input_shape),
args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))
# Case with just input_shape and kernel_size
yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))
return list(generator())
def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, kwargs
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 9), (3,), dict()),
((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)),
((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)),
((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)),
((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)),
]
def generator():
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
return list(generator())
def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 3, 4, 4), (2, 2, 2), dict()),
((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True,
count_include_pad=False, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True,
count_include_pad=True, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)),
((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False,
count_include_pad=False, divisor_override=2)),
((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=-2)),
((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True,
count_include_pad=True, divisor_override=None)),
((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None)),
]
def generator():
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
return list(generator())
def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):
def get_tensor_input(size):
return make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs = []
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1,)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))
return inputs
def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)
arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(arg_a, args=(arg_b,)))
return inputs
def sample_inputs_igamma_igammac(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, low=1e-3)
cases = (((S, S), (S, S), False),
((S, S), (S, ), False),
((S, ), (S, S), True),
((), (), False))
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(other_shape, requires_grad=False),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_dist(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))
ps = (2, 4)
def generate_samples():
for size_x, size_y, p in product(sizes, sizes, ps):
yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))
return list(generate_samples())
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape, low=None, high=None, dtype=dtype, requires_grad=requires_grad):
return make_tensor(shape, device=device, dtype=dtype,
low=low, high=high, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# idx is a permutation of 0...S-1 for this function to be deterministic
idx = torch.randperm(S, device=device, dtype=torch.int64)
samples = [SampleInput(t, args=(1, idx, s))]
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
return samples
def sample_inputs_mode(op_info, device, dtype, requires_grad):
inputs = []
args = (
((S, S, S), (),),
((S, S, S), (1, ),),
((S, S, S), (1, True, ),),
((), (),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args)
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_put(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs
idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]
idx_list = [idx, -idx - 1]
for idx, acc in product(idx_list, (True, False)):
yield SampleInput(input=make_arg((S, S)),
args=(idx.detach().clone(),
make_arg((S,)),
acc))
# Scalar cases
scalar_sizes = [(), (1,)]
tgt_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
src_gen = (make_arg(size) for size in scalar_sizes)
for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):
yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),
src.detach().clone().requires_grad_(requires_grad),
acc))
# Empty cases
tgt_sizes = [(0,), (), (1,), (3, 2)]
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),
src.detach().clone().requires_grad_(requires_grad),
acc))
return list(gen_inputs())
def sample_inputs_take(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs: take S elements out of S * S
index = make_idx((S,), high=(S * S))
for idx in (index, -index - 1):
yield SampleInput(input=make_arg((S, S)), args=(idx,))
# Scalar cases
scalar_sizes = [(), (1,)]
src_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
for src, idx in product(src_gen, idx_gen):
yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),))
# Empty cases
src_sizes = [(0,), (), (1,), (3, 2)]
src_gen = (make_arg(size) for size in src_sizes)
idx = make_idx((0,), high=1)
for src in src_gen:
yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),))
return list(gen_inputs())
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, 1, 2, 3], [3, 2, 1, 0])),
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, -1, -2, -3], [-3, -2, -1, -0]))
)
def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]
shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]
samples = []
for rep_dim, shape in product(rep_dims, shapes):
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
if op_info.name == 'repeat' and len(rep_dim) < len(shape):
continue
samples.append(SampleInput(make_arg(shape), args=(rep_dim,),))
return samples
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, S, S), (1, 2, 2)),
((S, S, S), (-1, 2, 2)),
((S, S, S), (1, 0, 0)),
((S, S, S), (-1, 0, 0)),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_axes = [
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 3),
((3, 4, 5), -1),
((3, 4, 5), -3),
((), 0)
]
samples = []
for shape, axis in shapes_and_axes:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(tensor, args=(axis,),))
return samples
def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):
shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))
kernel_sizes = (2, (2, 2), (3, 3))
dilations = (1, 2, (1, 2))
paddings = (0, 1, (1, 1))
strides = (1, 2, (1, 2))
def generator():
cases = product(shapes, kernel_sizes, dilations, paddings, strides)
for shape, kernel_size, dilation, padding, stride in cases:
tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)
yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))
# With default args
yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),
args=((3, 3),))
return list(generator())
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, 1, S, 1), ()),
((1, 1, 1, 1), ()),
((S, 1, S, 1), (1,)),
((S, 1, S, 1), (-1,)),
((S, 1, S, 1), (2,)),
((S, 1, S, 1), (-2,)),
((), (0, )),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
assert mode in ('constant', 'reflect', 'replicate', 'circular')
if mode in ['reflect', 'replicate']:
cases: tuple = ( # ignore
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
elif mode == 'constant':
cases = (
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((1, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((0, 3, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((1, 3, 3), (1, 1, 1, 1, 1, 1)),
((0, 3, 3, 3), (1, 2)),
((0, 3, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((3, 3, 5, 5), (1, 2)),
((3, 3, 5, 5), (0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 2)),
((1, 3, 3, 3, 3), (0, 1)),
((1, 3, 3, 3, 3), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
else: # mode == 'circular'
if dtype == torch.bool:
# test_dtypes fails on ASAN with for the case ab
# runtime error: load of value 190, which is not a valid value for type 'bool'
# Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562
# Reference Issue: https://github.com/pytorch/pytorch/issues/63034
cases = (
((2, 3, 3), (1, 2)),
((1, 3, 3), (1, 2)),
)
else:
cases = (
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
if mode == 'constant':
# Default args
yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))
if mode in ['reflect', 'replicate', 'circular']:
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode))
else: # mode == 'constant'
for pad_value in (1., 2.):
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))
return list(generator())
# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet
# Creates matrices with a positive nonzero determinant
def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):
def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):
u, s, vh = torch.linalg.svd(A, full_matrices=False)
s.clamp_(min=min_singular_value)
A = (u * s.unsqueeze(-2)) @ vh
det = A.det()
if sign is not None:
if A.dim() == 2:
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
samples = []
# cases constructed using make_tensor()
tensor_shapes = (
(S, S),
(1, 1),
(3, 3, S, S),
(3, 3, 1, 1)
)
for shape in tensor_shapes:
t = make_tensor(shape, device=device, dtype=dtype)
d = make_nonzero_det(t).requires_grad_(requires_grad)
samples.append(SampleInput(d))
# cases constructed using:
# 1) make_symmetric_matrices
# 2) make_symmetric_pd_matrices
# 3) make_fullrank_matrices_with_distinct_singular_values
symmetric_shapes = (
(S, S),
(3, S, S),
)
def _helper(constructor, *shape, **kwargs):
t = constructor(*shape, device=device, dtype=dtype)
d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)
samples.append(SampleInput(d))
for shape in symmetric_shapes:
_helper(make_symmetric_matrices, *shape)
_helper(make_symmetric_pd_matrices, *shape)
_helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)
return tuple(samples)
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, (31,), device=device,
dtype=dtype, requires_grad=requires_grad)
if self.ndimensional == SpectralFuncType.ND:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(8,))),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
elif self.ndimensional == SpectralFuncType.TwoD:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(6, 8))),
SampleInput(nd_tensor(),
kwargs=dict(dim=0)),
SampleInput(nd_tensor(),
kwargs=dict(dim=(0, -1))),
SampleInput(nd_tensor(),
kwargs=dict(dim=(-3, -2, -1))),
]
else:
return [
SampleInput(nd_tensor(),
kwargs=dict(n=10, dim=1, norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(n=7)),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input(()), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1))
]
SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: SpectralFuncType,
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
skipCUDAIfRocm,
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
self.ndimensional = ndimensional
def sample_inputs_stft(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt(100), kwargs=dict(n_fft=10))
for center in [False, True]:
yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center))
yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device)
yield SampleInput(
mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
yield SampleInput(
mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
if not dtype.is_complex:
yield SampleInput(
mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False))
def sample_inputs_istft(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
real_shape = shape if dtype.is_complex else shape + (2,)
return make_tensor(real_shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10))
yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False))
yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True))
for center in [False, True]:
yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center))
yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device)
yield SampleInput(mt((10, 10, 6)), kwargs=dict(
n_fft=10, window=window, center=center, return_complex=dtype.is_complex))
yield SampleInput(mt((10, 10, 10)), kwargs=dict(
n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True))
real_window = window if not dtype.is_complex else window.real
yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center))
def sample_inputs_fftshift(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((9, 10)))
yield SampleInput(mt((50,)), kwargs=dict(dim=0))
yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))
yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))
yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):
if same_size:
return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = "_foreach_" + name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
safe_casts_outputs=True,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
safe_casts_outputs=safe_casts_outputs,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
if name == "norm":
self.ref = torch.linalg.vector_norm
def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):
# Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
)
test_cases = (torch.linalg.cholesky(a) for a in inputs)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
out.append(SampleInput(a.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=True)))
return out
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
device = torch.device(device)
drivers: Tuple[str, ...]
if device.type == 'cuda':
drivers = ('gels',)
else:
drivers = ('gels', 'gelsy', 'gelss', 'gelsd')
# we generate matrices of shape (..., n + delta, n)
deltas: Tuple[int, ...]
if device.type == 'cpu' or has_cusolver():
deltas = (-1, 0, +1)
# only square systems if Cusolver is not available
# becase we solve a lstsq problem with a transposed matrix in the backward
else:
deltas = (0,)
out = []
for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):
shape = batch + (3 + delta, 3)
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
a.requires_grad_(requires_grad)
b = make_tensor(shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver)))
return out
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
samples = (
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
# m = n = S, k = S - 2
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
# m = S, n = S -1, k = S - 2
SampleInput(make_tensor((S, S - 1), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
)
return samples
def sample_inputs_ormqr(op_info, device, dtype, requires_grad):
# create a helper function wrapping `make_tensor`
make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def gen_inputs():
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
tf = [True, False]
for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):
reflectors = make_input((*batch, m, n))
tau = make_input((*batch, min(m, n)))
other_matrix_shape = (m, n) if left else (n, m)
other = make_input((*batch, *other_matrix_shape))
kwargs = {"left": left, "transpose": transpose}
yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)
return tuple(gen_inputs())
def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n, upper in product(batches, ns, [True, False]):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a, kwargs={"upper": upper}))
return out
def sample_inputs_symeig(op_info, device, dtype, requires_grad=False):
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"upper": bool(np.random.choice([True, False])),
"eigenvectors": True}
# A gauge-invariant function
o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))
return out
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.eigh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.kwargs = {"UPLO": np.random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):
def out_fn(output):
return output[1]
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
real_dtype = out[0].input.real.dtype if dtype.is_complex else dtype
for o in out:
# requires_grad path for rcond tensor is not implemented
for rcond in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):
o.kwargs = {"rcond": rcond}
return out
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
for o in out:
o.kwargs = {"hermitian": True}
return out
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):
"""
This function generates always solvable input for torch.linalg.solve
Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
out = []
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)
b.requires_grad = requires_grad
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
bs = (1, 2, 0)
ns = (3, 0)
ks = (1, 3, 0)
def gen_inputs():
for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)):
with torch.no_grad():
if b == 1:
A = make_arg((n, n)) if left else make_arg((k, k))
B = make_arg((n, k))
else:
A = make_arg((b, n, n)) if left else make_arg((b, k, k))
B = make_arg((b, n, k))
if uni:
# Not really necessary, but writing it for consistency
A.diagonal(0, -2, -1).fill_(1.)
else:
d = A.diagonal(0, -2, -1)
d[d.abs() < 1e-6] = 1.
if upper:
A.triu_()
else:
A.tril_()
kwargs = {"upper": upper, "left": left, "unitriangular": uni}
if requires_grad:
for grad_A, grad_B in product((True, False), repeat=2):
# Either A or B needs to have a gradient
if not grad_A and not grad_B:
continue
A.requires_grad_(grad_A)
B.requires_grad_(grad_B)
yield SampleInput(A, args=(B,), kwargs=kwargs)
else:
yield SampleInput(A, args=(B,), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
return out
def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs):
out = sample_inputs_linalg_cholesky_inverse(
op_info, device, dtype, requires_grad=False
)
for sample in out:
psd_matrix = sample.input
sample.input = make_tensor(psd_matrix.shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
sample.args = (psd_matrix.requires_grad_(requires_grad),)
return out
def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
batch_shapes = ((), (3,), (3, 3))
for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):
shape = batch_shape + (S + size_delta, S)
input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
yield SampleInput(input, args=(True, get_infos))
return list(generate_samples())
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 3, 0]
nrhs = [0, 1, 6]
def generate_samples():
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
requires_grad_options = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for lu_requires_grad, b_requires_grad in product(requires_grad_options, requires_grad_options):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not lu_requires_grad and not b_requires_grad:
continue
# we run LU several times to guarantee that the produced SampleInputs are independent
# this is especially important when setting different requries_grad for same tensors!
lu, pivs = a.lu()
lu.requires_grad = lu_requires_grad
b = torch.randn(*batch, n, rhs, dtype=dtype, device=device)
b.requires_grad = b_requires_grad
yield SampleInput(b, args=(lu, pivs))
return list(generate_samples())
def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):
lu_data, pivots = lu_sample.input.lu()
yield SampleInput(lu_data, args=(pivots,))
# generate rectangular inputs
lu_data_shape = lu_data.shape
batch_shape = lu_data_shape[:-2]
n = lu_data_shape[-2]
for shape_inc in ((1, 0), (0, 1)):
lu_data, pivots = make_tensor(
batch_shape + (n + shape_inc[0], n + shape_inc[1]),
device, dtype,
requires_grad=False,
low=None, high=None
).lu()
lu_data.requires_grad_(requires_grad)
yield SampleInput(lu_data, args=(pivots,))
return list(generate_samples())
def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((1, (0, 1),),
(1, (1, 2),),
(1, (1, -1),),
())
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):
tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype,
requires_grad=requires_grad)
tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype,
requires_grad=requires_grad)
return [
SampleInput(tensor_nd()),
SampleInput(tensor_nd(), kwargs=dict(dim=1)),
SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)),
SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)),
SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)),
]
def _generate_correlation_inputs(device, dtype, requires_grad):
shapes = [(2,), (1, 2), (3, 2), (2, 3)]
for shape in shapes:
yield make_tensor(shape, device, dtype, requires_grad=requires_grad)
def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]
def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_correlation_inputs(device, dtype, requires_grad):
inputs.append(SampleInput(t))
num_observations = t.numel() if t.ndimension() < 2 else t.size(1)
fweights = make_tensor((num_observations,), device, torch.int, low=1, high=10)
aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)
for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))
return inputs
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [((1, 2, 3, 4), (0, 2, 3, 1)),
((1, 2, 3, 4), (0, -2, -1, 1)),
((), ()),
((1, 2, 3, 4), (2, 1, 3, 0))]
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
return list(generator())
# Based on erstwhile method_tests tests & some tensor_op_tests for pow
def sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
samples = []
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
test_cases = (
((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),
((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, False, False),
((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),
)
tests_require_resizing = (
((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),
((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),
)
cases = test_cases + tests_require_resizing
samples = []
for (shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,
high_e, additive_e, e_grad, broadcasts_input) in cases:
si = SampleInput((make_arg(shape_b, low=low_b, high=high_b) + additive_b).requires_grad_(b_grad),
args=((make_arg(shape_e, low=low_e, high=high_e) + additive_e).requires_grad_(e_grad),),
broadcasts_input=broadcasts_input)
samples.append(si)
tensor_scalar_inputs = (
((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))
)
more_samples = list(SampleInput(
(make_arg(shape, high=high, low=low) + additive).requires_grad_(b_grad),
args=exp)
for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)
samples = [*samples, *more_samples]
elif dtype in [torch.complex64, torch.complex128]:
args_tuple = (
((2, 2), 0, 5, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14j,))
)
samples = list(SampleInput(
(make_arg(shape, high=high, low=low) + 1e-3 * (1 + 1j)).requires_grad_(b_grad),
args=arg)
for shape, low, high, b_grad, arg in args_tuple)
else: # integral dtype
exp_tuple = (1, 2, 3)
samples = list(SampleInput(
make_arg((2, 2), requires_grad=requires_grad),
args=(arg,))
for arg in exp_tuple)
samples.append(SampleInput(
make_arg((2, 2), requires_grad=requires_grad),
args=(make_arg((2, 2), requires_grad=requires_grad),)))
return tuple(samples)
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):
eigvecs = make_tensor((S, S), device=device, dtype=dtype,
low=None, high=None)
eigvals = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None)
# we produce only diagonazible inputs which do not have
# complex eigenvalues for real inputs, as there is no
# backward implementation for real inputs with complex
# eigenvalues yet.
input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()
input.requires_grad_(requires_grad)
def process_output(eigpair):
eigvals, eigvecs = eigpair
if dtype.is_complex:
# eig produces eigenvectors which are normalized to 1 norm.
# Note that if v is an eigenvector, so is v * e^{i \phi},
# and |v| = |v * e^{i \phi}| = 1.
# This, however, makes the eigenvector backward computation process
# rather unstable unless the objective function is gauge-invariant,
# that is if f(z) == f(|z|), for example.
# Hence for complex inputs we ignore the phases and return only
# the absolute values.
return eigvals, eigvecs.abs()
else:
return eigvals, eigvecs
return [
SampleInput(
input,
kwargs=dict(eigenvectors=True),
output_process_fn_grad=process_output
),
]
def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):
def c(t):
return t.detach().clone().requires_grad_(requires_grad)
x = make_tensor((3,), device, dtype, requires_grad=requires_grad)
y = make_tensor((4,), device, dtype, requires_grad=requires_grad)
A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad)
B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)
C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)
D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad)
E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)
H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad)
I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)
inputs = []
# Vector operations
inputs.append(SampleInput([c(x)], args=('i->',))) # sum
inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer
# Matrix operations
inputs.append(SampleInput([c(A)], args=("ij->i",))) # col sum
inputs.append(SampleInput([c(A), c(B)], args=("ij,kj->ik",))) # matmul
inputs.append(SampleInput([c(A), c(E)], args=("ij,Ab->ijAb",))) # matrix outer product
# Tensor operations
inputs.append(SampleInput([c(C), c(D)], args=("aij,ajk->aik",))) # batch matmul
inputs.append(SampleInput([c(D), c(E)], args=("aij,jk->aik",))) # tensor matrix contraction
inputs.append(SampleInput([c(C), c(B)], args=("ijk,ik->j",))) # non contiguous
# Test diagonals
inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace
# Test ellipsis
inputs.append(SampleInput([c(H)], args=("i...->...",)))
inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',)))
return inputs
def sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.qr
The input is generated as the itertools.product of 'batches' and 'ns'.
"""
batches = [(), (0,), (2, ), (1, 1)]
ns = [5, 2, 0]
out = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)
out.append(SampleInput(a))
return out
def sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
# TODO: CUDA path doesn't work with batched or empty inputs
if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):
continue
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_flip(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, M, S), (S, 0, M))
all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
def gen_samples():
for size, dims in product(sizes, all_dims):
yield SampleInput(make_arg(size), kwargs={"dims": dims})
return list(gen_samples())
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
if autodiffed:
samples = (
((S, S, S), 1.5, False),
((), 1.5, False),
)
else:
cases = (
((S, S, S), (), False),
((S, S, S), (S, S, S), False),
((S, S, S), (S,), False),
)
# Sample inputs with scalars as torch tensors
cases_with_tensor_scalar = (
((), torch.tensor(1, dtype=dtype, device=device, requires_grad=False), False),
)
# Sample inputs with broadcasting
cases_with_broadcasting = (
((S,), (S, S, S), True),
((S, 1, S), (S, S, S), True),
((), (S, S, S), True),
)
samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]
def generator():
for shape, arg_other, broadcasts_input in samples:
if isinstance(arg_other, tuple):
arg = make_arg(arg_other, requires_grad=False, exclude_zero=True)
else:
# shape_other is scalar or torch.tensor
arg = arg_other
yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))
return list(generator())
# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!
def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):
x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
def detach(tensor):
return tensor.clone().detach_().requires_grad_(requires_grad)
return [
SampleInput(detach(x), args=(lb, ub)),
SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),
SampleInput(detach(x), args=(detach(lb[:, :1]),)),
]
def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
if dtype is torch.uint8:
min_max_vals = ((2, 5), (3, 7))
else:
min_max_vals = ((0, 1), (-1, 1))
output = [SampleInput(
tensor.detach().clone().requires_grad_(requires_grad),
args=vals) for tensor, vals in product(tensors, min_max_vals)]
output += [
SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),
args=(0.5, None)),
SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),
args=(None, 0.5))]
empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)
output.append(SampleInput(empty_tensor, args=(0.0, 1.0)))
return output
def sample_kwargs_clamp_scalar(device, dtype, input):
if dtype is torch.uint8:
min_val, max_val = (random.randint(1, 3), random.randint(4, 8))
elif dtype.is_floating_point:
min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]
else:
min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))
return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))
sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': 1})
sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': -1})
return (sample0, sample1, sample2)
def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_zeros(dim_select):
assert len(dim_select) == 2
result = make_arg(3 * (S,))
with torch.no_grad():
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
# will not be needed once OpInfo tests suport Iterables
def sample_generator():
for dim in range(3):
yield SampleInput(make_arg((S, S, S)), args=(dim,))
# Scalar tensors and empty tensor
for size in [(), (1,), (0,)]:
yield SampleInput(make_arg(size), args=(0,))
yield SampleInput(prod_zeros([0, 1]), args=(1,))
yield SampleInput(prod_zeros([0, 2]), args=(1,))
yield SampleInput(prod_zeros([1, 2]), args=(1,))
# test dtype kwarg
yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})
return list(sample_generator())
def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]
def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((), device, dtype, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor(*shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
cases = [
# no broadcast
((S, S, S), (S, S, S), False),
# broadcast rhs
((S, S, S), (S, S), False),
# scalar
((S, S), 3.14, False),
# scalar positive zero
((S, S), 0.0, False),
# scalar negative zero
((S, S), -0.0, False),
]
# broadcast lhs
cases.append(((S, S), (S, S, S), True))
# broadcast all
cases.append(((S, 1, S), (M, S), True))
def generator():
for input_shape, arg_val, broadcasts_input in cases:
if isinstance(arg_val, tuple):
arg = _make_tensor(*arg_val)
else:
# arg_val is scalar
arg = arg_val
yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_prod(op_info, device, dtype, requires_grad):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_single_zero():
result = make_arg(2 * (S,))
with torch.no_grad():
result[0, 1] = 0
return result
# will not be needed once OpInfo tests support Iterables
def sample_generator():
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
# only Tensor, ignore other inputs
yield SampleInput(sample.input.detach().clone().requires_grad_(requires_grad))
yield sample
# Generates samples with keepdim = True
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
sample.kwargs['keepdim'] = True
yield sample
yield SampleInput(prod_single_zero())
yield SampleInput(make_arg((3, 3, 3)), args=(1,))
yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})
# test zero scalar tensor
zero = make_arg(())
with torch.no_grad():
zero.zero_()
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad))
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad), args=(0,))
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad),
args=(0,),
kwargs={'keepdim': True})
return list(sample_generator())
def error_inputs_neg(op_info, device, **kwargs):
si = SampleInput(torch.tensor((False, True), device=device))
msg = ("Negation, the `\\-` operator, on a bool tensor is not supported."
" If you are trying to invert a mask, use the `\\~` or"
" `logical_not\\(\\)` operator instead.")
return (ErrorInput(si, error_type=RuntimeError, error_regex=msg),)
def sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (
((S, S), (S, S), False),
((S, S), (S,), False),
((S, ), (S, S), True)
)
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor.detach().clone().requires_grad_(requires_grad), args=arg))
return samples + [vec_sample]
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
yield SampleInput(make_arg(shape), args=arg)
return list(generator())
def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
input_ = make_arg(input_shape)
# We can programatically figure out the right shape for src:
# It should be the same size as input.diagonal(other_args...)
if not isinstance(arg, tuple):
arg_tuple = (arg,)
else:
arg_tuple = arg
src_shape = input_.diagonal(*arg_tuple).size()
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *arg_tuple))
return list(generator())
def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),
SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)
def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs):
batch_size, num_classes = shape = (2, 3)
reductions = ("mean", "sum", "none")
input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [
(shape, dict()),
((*shape, 1), dict()),
((*shape, 1, 2), dict()),
((*shape, 1, 2, 3), dict()),
*[(shape, dict(reduction=reduction)) for reduction in reductions],
*[
(
shape,
dict(
weight=make_tensor((num_classes,), device=device, dtype=dtype),
reduction=reduction,
),
)
for reduction in reductions
],
(shape, dict(ignore_index=1)),
]
sample_inputs = []
for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)):
input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)
if probabilities_target:
# ignore_index is not supported for probabilities target
if "ignore_index" in kwargs:
continue
target = make_tensor(
input_shape,
low=0,
high=1,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
else:
target = make_tensor(
(batch_size, *input_shape[2:]),
low=0,
high=num_classes,
device=device,
dtype=torch.long,
)
if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]):
# make sure at least one item in target is not ignored
target[0] = random.sample(set(range(num_classes)) - {kwargs["ignore_index"]}, 1)[0]
sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))
return sample_inputs
# Used for log_softmax, softmax, softmin
def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [
((S, ), (0, )),
((S, S), (0, )),
((S, S), (1, )),
((S, S), (-1, )),
((S, M, S), (2, )),
]
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != 'xla':
cases.append(((), (0, )))
return [
SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)
for shape, dim in cases
]
def sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
"""Sample inputs for masked softmax, log_softmax, and softmin.
Masked normalization operator is a reduction operator with
trailing mask optional argument. A mask is a bool tensor with the
same shape as input or a shape that is broadcastable to input
shape.
"""
inputs: List[SampleInput] = []
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked normalize.
"""
inputs: List[SampleInput] = []
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_isin(op_info, device, dtype, requires_grad):
element = make_tensor((L,), device, dtype, low=None, high=None, requires_grad=requires_grad)
indices = torch.randint(0, L, size=[S])
test_elements = element[indices].clone()
return [
SampleInput(element, args=(test_elements,))
]
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def samples_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),
broadcasts_input=True)
samples = tuple(samples_generator())
return samples
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def sample_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg(())),
broadcasts_input=True)
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
samples = tuple(sample_generator())
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),
)
return samples
def sample_inputs_matmul(op_info, device, dtype, requires_grad):
test_cases = (((L,), (L,)),
((S, M), (M,)),
((M,), (M, S)),
((S, M), (M, S)),
((S, 0), (0, M)),
((S, S, M), (M,)),
((S, S, M), (M, S)),
((S, S, 0), (0, S)),
((M,), (S, M, S)),
((S, M), (S, M, S)),
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
((M,), (S, S, M, S)))
sample_inputs = []
for lhs_shape, rhs_shape in test_cases:
lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
if op_info.name == 'matmul':
sample_inputs.append(SampleInput(lhs, args=(rhs,)))
elif op_info.name == '__rmatmul__':
sample_inputs.append(SampleInput(rhs, args=(lhs,)))
else:
raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'")
return tuple(sample_inputs)
def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,
requires_grad: bool,
*, variant: str) -> List[SampleInput]:
if variant == 'variadic':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors[0], tuple(tensors[1:])
elif variant == 'list':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors, ()
else:
raise ValueError(
'Unsupported variant, must be one of {"variadic", "list"}. '
f'Got "{variant}".')
SCALAR = torch.Size([])
VECTOR = torch.Size([3])
test_cases: List[List[torch.Size]] = [
[SCALAR],
[VECTOR],
[VECTOR, SCALAR],
[VECTOR, SCALAR, VECTOR],
[VECTOR, SCALAR, VECTOR, SCALAR],
]
sample_inputs = []
for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}):
input, args = make_inputs(
[make_tensor(shape, device, dtype, requires_grad=requires_grad)
for shape in shapes])
sample_inputs.append(SampleInput(input=input, args=args,
kwargs=dict(indexing=indexing)))
return sample_inputs
def sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape):
return make_tensor(shape, device, dtype, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
def generator():
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
return list(generator())
def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
# Since the accepted lower bound for input
# to mvlgamma depends on `p` argument,
# the following function computes the lower bound
# which we pass to `make_tensor`.
def compute_min_val(p):
return (p - 1.) / 2
def generator():
for shape, n in product(tensor_shapes, ns):
min_val = compute_min_val(n)
if not dtype.is_floating_point:
# Round-up minimum value for integral dtypes
min_val += 1
yield SampleInput(make_arg(shape, low=min_val), args=(n,))
return list(generator())
# Since `mvlgamma` has multiple entries,
# there are multiple common skips for the additional
# entries. Following function is a helper to that end.
def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
)
return skips
# To test reference numerics against multiple values of argument `p`,
# we make multiple OpInfo entries with each entry corresponding to different value of p.
# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.
# Class `MvlGammaInfo` already contains the basic information related to the operator,
# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which
# differ between the entries.
class MvlGammaInfo(UnaryUfuncInfo):
def __init__(self, variant_test_name, domain, skips, sample_kwargs):
super(MvlGammaInfo, self).__init__(
'mvlgamma',
ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,
aliases=('special.multigammaln',),
variant_test_name=variant_test_name,
domain=domain,
decorators=(precisionOverride({torch.float16: 5e-2}),),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half),
sample_inputs_func=sample_inputs_mvlgamma,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=skips,
sample_kwargs=sample_kwargs)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
return (SampleInput(make_tensor((L,), device, dtype,
low=low,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=low,
requires_grad=requires_grad)))
def sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(make_arg((S,), low=2, requires_grad=False),)),
SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(3.,)),
)
return samples
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S,), device, dtype,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
requires_grad=requires_grad)))
if requires_grad and op_info.op == torch.special.i0e:
# NOTE: `i0e`'s first-order gradient is not continous
# at `0`, hence we don't test `i0e` with any input being `0`.
# TODO: Remove this when `make_tensor` supports excluding `0`.
with torch.no_grad():
for sample in samples:
t = sample.input
t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]
elif requires_grad and op_info.op != torch.special.i0e:
# Special Case for gradient
# Sample with `0` in the input
t = make_tensor((S,), device, dtype,
requires_grad=requires_grad)
with torch.no_grad():
t[0] = 0
samples += (SampleInput(t),) # type: ignore[assignment]
return samples
def sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):
filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]
return (SampleInput(input.detach().clone().requires_grad_(requires_grad),
args=(arg,), kwargs=dict(alpha=alpha))
for (input, arg), alpha in filtered_product)
int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j
if variant == 'tensor':
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),
)
if dtype.is_complex:
alphas = [int_alpha, float_alpha, complex_alpha]
elif dtype.is_floating_point:
alphas = [int_alpha, float_alpha]
else:
alphas = [int_alpha]
args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),
(_make_tensor_helper((S, S)), _make_tensor_helper((S,))),
(_make_tensor_helper(()), _make_tensor_helper(())))
samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]
elif variant == 'scalar':
# Scalar Other
samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),
SampleInput(_make_tensor_helper(()), args=(0.5,)),
SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),
SampleInput(_make_tensor_helper(()), args=(1.5j,)),
SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),
SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))
scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),
(_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),
(_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]
alphas = [int_alpha, float_alpha, complex_alpha]
def filter_fn(arg_alpha):
arg, alpha = arg_alpha
if isinstance(alpha, complex):
if dtype.is_complex or isinstance(arg[1], complex):
return True
else:
# complex alpha is valid only if either `self` or `other` is complex
return False
# Non-Complex Alpha
return True
# Samples with alpha (scalar version) covers the following cases
# self | other | alpha
# -----------------------------------------
# real | real | real (int and float)
# real | complex | real and complex
# complex | real | real and complex
# complex | complex | real and complex
#
# It does not cover
# real | real | complex
# x = torch.randn(2, requires_grad=True, dtype=torch.float64)
# torch.rsub(x, 1, alpha=1. + 1.6j)
# RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)
samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]
else:
raise Exception("Invalid variant!")
return samples
def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),
SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),
SampleInput(_make_tensor_helper(()), args=(0,)),
]
if supports_dtype_kwargs:
# NOTE: if `dtype` is not same as input, then inplace variants fail with
# `provided dtype must match the dtype of self tensor in cumsum`
samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))
return samples
def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((), (0, 1, 1)),
((S, S, S, S), (0, 3, 1)),
((S, S, S, S), (1, 3, 1)),
((S, S, S, S), (2, 3, 1)),
((S, S, S, S), (3, 3, 1)),
((S, S, S, S), (0, 3, 2)),
((S, S, S, S), (1, 3, 2)),
((S, S, S, S), (2, 3, 2)),
((S, S, S, S), (3, 3, 2)),
((S, S, S, S), (0, 4, 1)),
((S, S, S, S), (1, 4, 1)),
((S, S, S, S), (2, 4, 1)),
((S, S, S, S), (3, 4, 1)),
((M,), (0, 3, 1)),
((M,), (0, 3, 2)),
((M,), (0, 3, 3)),
((1000,), (0, 3, 11)),
((1000,), (0, 2, 27)),
((10, 10), (0, 1, 2)),
((10, 10), (1, 2, 3)),
((10, 10), (1, 2, 2)),
((S, S, S), (2, 3, 2)),
)
sample_inputs = []
for shape, arguments in test_cases:
sample_inputs += [SampleInput(make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=arguments)]
return sample_inputs
def sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S, S), (S, S, S), False),
((), (), False),
((S, S, S), (S,), False),
((S,), (S, S, S), True),
((S, 1, S), (S, S), True),
)
def generator():
for x_shape, y_shape, broadcasts_input in cases:
yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if list_args:
cases = (
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)
)
else:
cases = ( # type: ignore[assignment]
((S, S, S), (2,)),
((S, S, S), (S, 1)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_msort(op_info, device, dtype, requires_grad):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
sample = SampleInput(make_tensor((S, M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))
return [largesample, sample]
def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = (
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),
# broadcast rhs with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),
# broadcast rhs and weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),
# broadcast lhs
SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# scalar broadcast_lhs
SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# tensor broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),
broadcasts_input=True),
# no broadcast with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))),
# broadcast lhs with weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor variant
SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True),
)
if dtype.is_complex:
samples = samples + ( # type: ignore[assignment]
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),
SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),
)
return samples
def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):
cases = (
((2, 2, 2), (2, 2, 2), (2)),
((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),
)
samples = []
for first_shape, second_shape, dims in cases:
samples.append(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
kwargs=dict(dims=dims,)))
return tuple(samples)
def sample_inputs_kron(op_info, device, dtype, requires_grad):
test_cases = (
((S, S), (M, L)),
)
sample_inputs = []
for input_shape, other_shape in test_cases:
input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample = SampleInput(input, args=(other,))
sample_inputs.append(sample)
return tuple(sample_inputs)
def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
)
),
SampleInput(
make_tensor((), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
)
),
)
def sample_inputs_scatter(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
(_tensor(()), (0, zero.clone().detach(), 2.5)),
)
samples = []
for tensor, args in test_cases:
samples.append(SampleInput(tensor, args=args))
if not requires_grad:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'add'}
))
if dtype.is_floating_point:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'multiply'}
))
return samples
def sample_inputs_scatter_add(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
)
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),)
return samples
def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((M, M), ()),
((M, M), (2,),),
((S, M, M), ()),
((S, M, M), (2,)),
((3, 3, S, S), ()),)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, M, S)))
yield SampleInput(make_arg(()))
return list(generator())
def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, S)))
return list(generator())
def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# list of tuples (shape, shape) defining the shapes of the input and output tensors
sample_shapes = [
((), ()),
((S), (1)),
((S, S), (1, 1)),
((S, S), (1, S)),
((S, S), (S, S)),
((S, S, S), (S, 1, S)),
]
samples = []
for input_shape, output_shape in sample_shapes:
input_t = make_arg(input_shape)
samples.append(SampleInput(input_t, args=(output_shape,)))
return samples
def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1, 1)),
)
def generator():
for shape, args_or_shape in cases:
# Update `args` based on operator
if op_info.name == 'resize_':
# resize_ takes shape/tuple of ints,
args = (args_or_shape, )
elif op_info.name == 'resize_as_':
# resize_as_ takes another tensor
args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]
else:
raise ValueError("sample_inputs_resize_ops is being used with incorrect operator")
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S * S, S)),
((S * S, S), (S, S, S)),
((S * S, S), (S, -1, S)),
((S * S * 2, S), (S, -1)),
((S,), (S,)),
((), ()),
((), (1,)))
def generator():
for case in cases:
shape, args = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(args, )))
if op_info.name != "view" and len(shape) >= 2:
yield(SampleInput(
inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),
args=(args, )))
return list(generator())
def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for case in cases:
shape, shape_other = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))
if op_info.name != "view_as" and len(shape) >= 2:
yield(SampleInput(
inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),
args=(make_arg(shape_other, requires_grad=False),)))
return list(generator())
def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs):
input_list = []
shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = []
for shape in shapes:
input_list.append(make_tensor_partial(shape))
samples.append(SampleInput(make_tensor_partial(shape)))
samples.append(SampleInput(input_list, ))
return samples
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (1, 2)),
((S, S, S), (-1, 2)),
((S, S, S), (-1, -1)),
((S, S, S), (1, -1)),
((S,), (0, 2))
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S, S), (1, 2)),
((S, S, S), (S, S), (-1, 2)),
((S, S, S), (S, S), (-1, -1)),
((S, S, S), (S, S), (1, -1)),
((S,), (), (0, 2))
)
def generator():
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
return list(generator())
def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)),
((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)),
((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)),
((L, L, L), (L, L, L,), (1, 0, L, 1)),
((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)),
((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)),
((L, L, L), (L, L, L,), (2, 0, L, 1)),
((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)),
((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)),
)
def generator():
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
return list(generator())
def sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
scalar: Union[int, float, complex] = 3
if dtype.is_floating_point:
scalar = 3.14
elif dtype.is_complex:
scalar = 3.14j
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),
SampleInput(_make_tensor_helper(()), args=(scalar,)),
]
return samples
def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1, S), (-1, S, -1)),
((S, 1, S), (-1, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=(args, )))
return list(generator())
def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((),
(2, 3))
memory_format_options = [None, torch.contiguous_format]
def generator():
for shape, memory_format in itertools.product(shapes, memory_format_options):
yield SampleInput(make_arg(shape),
kwargs={'memory_format': memory_format} if memory_format else {})
return list(generator())
def sample_inputs_conversion_channels_last(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
return [
# Channels last case: input must be 4d
SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last})
]
def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, 1, 1), (S, S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for shape, shape_other in cases:
yield(SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(shape_other, requires_grad=False), )))
return list(generator())
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def make_bool_mask(shape):
# Make sure atleast one element is nonzero,
# except for empty tensor
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
if mask_t.numel() == 0:
return mask_t
elif mask_t.numel() == 1:
mask_t.fill_(True)
return mask_t
if mask_t.sum() == 0:
def random_index(shape):
return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))
mask_t[random_index(mask_t.shape)] = True
return mask_t
return mask_t
cases = (((M, M), (M, M), (M, M), False),
((M, 1, M), (M, M), (M, M, 1), True),
((), (), (), False),
((M, 1, M), (), (M, M, 1), True),
((), (M, M), (), True),)
def generator():
for shape, mask_shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape),
args=(make_bool_mask(mask_shape), make_arg(other_shape)),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
inputs = []
for shape in sizes:
# construct input without any non-zero elements
zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
inputs.append(zeros)
# construct input with mixed zero and non-zero elements
mixed = make_arg(shape).requires_grad_(False)
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
mixed[mask_t] = 0
inputs.append(mixed)
def generator():
for input_t, as_tuple in product(inputs, [False, True]):
yield(SampleInput(input_t.detach().clone().requires_grad_(requires_grad),
kwargs=dict(as_tuple=as_tuple)))
return list(generator())
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (2,)),
((S, S, S), (S, 1)),
((S, S, S), (S, -1)))
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
test_cases = [
(_tensor((S, S, S)), (2,)),
(_tensor((S, S, S)), (2, 1,)),
(_tensor((S, S, S)), (2, -1,)),
(_tensor((S, S, S)), (2, 1, True,)),
(_tensor((S, S, S)), (2, -1, True,)),
(_tensor((S,)), (2, 0,)),
(_tensor((S,)), (2, 0, True,)),
(_tensor(()), (1,)),
(_tensor(()), (1, 0,)),
(_tensor(()), (1, 0, True))
]
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def error_inputs_kthvalue(op_info, device, **kwargs):
# tests overlapping output fails
t = make_tensor(10, dtype=torch.float32, device=device)
indices = torch.empty((), device=device, dtype=torch.long)
si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)})
k_out_of_range_err = "selected number k out of range for dimension"
return (ErrorInput(si, error_type=RuntimeError, error_regex="unsupported operation"),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)),
error_type=RuntimeError, error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)),
error_type=RuntimeError, error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)),
error_type=RuntimeError, error_regex=k_out_of_range_err),)
def sample_inputs_dropout(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_arg()),
SampleInput(make_arg(), kwargs=dict(p=0.0)),
SampleInput(make_arg(), kwargs=dict(p=1.0)),
SampleInput(make_arg(), kwargs=dict(training=False)),
]
def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high, noncontiguous=False):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high,
noncontiguous=noncontiguous)
def make_per_sample_weight(flag, idx):
# a tensor of float / double weights, or None
# to indicate all weights should be taken to be 1
if flag:
return make_input(idx.shape)
return None
def generator():
offsets = torch.tensor([0, 3], device=device, dtype=torch.long)
for generate_per_sample_weight in (True, False):
for mode in ('sum', 'mean', 'max'):
# per_sample_weights is only supported for mode='sum' (got mode='****')
if generate_per_sample_weight and mode in ('mean', 'max'):
continue
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
# bag with zero length
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long),
'mode': mode,
'per_sample_weights': per_sample_weights})
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((S, S), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
# The gradient vector at `padding_idx` is not updated.
# Negative padding_idx
idx = make_long_input((6,), low=0, high=S)
idx[0] = 4
idx[4] = 4
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': -1, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights},)
idx = make_long_input((3, 3), low=0, high=S)
# Positive padding_idx
idx[0, 0] = 2
idx[1, 1] = 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': 2, 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'norm_type': 1.0,
'mode': mode, 'offsets': offsets,
'per_sample_weights': per_sample_weights},)
if mode != 'max':
# Scale the gradient based on the inverse frequency of a particular index.
# Note : smax mode does not support sparse weights
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'scale_grad_by_freq': True, 'mode': mode,
'per_sample_weights': per_sample_weights},)
# gradcheck not implemented for sparse tensors.
# Note : max mode does not support sparse weights
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((6, ), low=0, high=S)
idx[0] = 1 # freq more than 1
idx[1] = 1 # freq more than 1
idx[3] = 0 # padding_idx
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0,
'max_norm': 1., 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
return list(generator())
def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)
def generator():
# 0-D index tensor
idx = make_long_input((), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# The gradient vector at `padding_idx` is not updated.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 2
idx[1, 1] = 2
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 4
idx[1, 1] = 4
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},)
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},)
# Scale the gradient based on the inverse frequency of a particular index.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)
# gradcheck not implemented for sparse tensors.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'sparse': True})
idx = make_long_input((3, 3), low=0, high=S)
idx[0, 0] = 1 # freq more than 1
idx[0, 1] = 1 # freq more than 1
idx[1, 0] = 0 # padding_idx
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True,
'padding_idx': 0, 'max_norm': 1.})
return list(generator())
def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)
shapes = ((), (S,), (L, M, S))
num_classess = (-1, 10)
return [
SampleInput(
make_input(
shape,
low=0,
high=10 if num_classes == -1 else num_classes // 2,
),
kwargs=dict(num_classes=num_classes),
)
for shape, num_classes in itertools.product(shapes, num_classess)
]
def sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input()),
SampleInput(make_input(), kwargs=dict(beta=3)),
SampleInput(make_input(low=1), kwargs=dict(threshold=1)),
]
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
def make_input():
return make_fullrank_matrices_with_distinct_singular_values(12, 12, device=device, dtype=dtype)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
samples = []
for shape_lhs, shape_rhs in shapes:
inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()
inp.requires_grad_(requires_grad)
samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs))))
return samples
def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
# Zero-dim tensors are not supported in NumPy, so we skip them for now.
# NumPy is used in reference check tests.
# See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.
# a_shapes += [(0, 0, 1, 2, 3, 0)]
dimss = [None, (0, 2)]
def gen_inputs():
for a_shape, dims in itertools.product(a_shapes, dimss):
a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad)
b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a, args=(b,), kwargs=dict(dims=dims))
return list(gen_inputs())
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes_and_kwargs = [
((), None),
((S,), dict(reduction="mean")),
((S,), dict(reduction="sum")),
((S,), dict(reduction="none")),
((S, S), None),
((S, S, S), None),
]
return [
SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)
for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batch_size = 2
num_channels = 3
modes = ("bilinear", "nearest")
align_cornerss = (False, True)
padding_modes = ("zeros", "border", "reflection")
sample_inputs = []
for dim in (2, 3):
modes_ = (*modes, "bicubic") if dim == 2 else modes
for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):
sample_inputs.append(
SampleInput(
_make_tensor((batch_size, num_channels, *[S] * dim)),
args=(_make_tensor((batch_size, *[S] * dim, dim)),),
kwargs=dict(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
)
)
return sample_inputs
def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_target(shape):
shape = () if len(shape) == 1 else (shape[0], )
t = torch.randint(0, 2, shape, device=device, dtype=torch.long)
# Label with -1 or 1
t = t * 2 - 1
target = t.to(dtype=dtype).detach()
return target
def gen_inputs():
shapes = ((S, S), (S,))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield SampleInput(
make_input(s),
args=(make_input(s), make_target(s)),
kwargs=dict(reduction=r, margin=random.uniform(-1, 1))
)
return list(gen_inputs())
def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs):
input_length = 50
batch = 16
num_char = 20
target_length = 30
def make_log_probs(s):
t = make_tensor(s, device=device, dtype=dtype)
log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad)
return log_probs
def gen_inputs():
reductions = ('none', 'mean', 'sum')
zero_inf = (True, False)
for r, z in product(reductions, zero_inf):
log_probs = make_log_probs((input_length, batch, num_char))
targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device)
input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device)
target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device)
yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z))
return list(gen_inputs())
def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
shape = (2, 3)
num_classes = shape[1]
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_weight = partial(make_tensor, shape=(num_classes,), device=device, dtype=dtype)
def make_target(shape, zeros=False):
s = (shape[0], *shape[2:]) if len(shape) > 1 else ()
if zeros:
return torch.zeros(s, device=device, dtype=torch.long)
else:
return make_tensor(s,
low=0,
high=shape[1] if len(shape) > 1 else shape[0],
device=device,
dtype=torch.long)
def gen_shape_kwargs():
# Batched, non-batched and 2d
shapes = (shape, (num_classes,), shape + (2, 2))
reductions = ('none', 'mean', 'sum')
for reduction, s in product(reductions, shapes):
yield make_input(s), make_target(s), dict(reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction)
t = make_target(s)
ignore = num_classes // 2
# If "mean", nll returns NaN, so it's not differentiable at those points
if t.eq(ignore).all() and reduction == "mean":
t.fill_(0)
yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction)
# Test ignoring all the targets
# If "mean", nll returns NaN, so it's not differentiable at those points
if reduction != "mean":
yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction)
def gen_inputs():
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target,), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs):
def generator():
yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad))
mask = torch.tensor([[0, 1, 0, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 0, 1, 0]], dtype=torch.bool, device=device)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)
with torch.no_grad():
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)
with torch.no_grad():
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(t)
yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad))
yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))
return list(generator())
def _generate_sample_shape_reduction():
shapes = ((S,), (S, S), (S, S, S))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield s, r
def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_var = partial(make_tensor, low=0, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape(shape):
yield shape
# Broadcast
yield (*shape[:-1], 1)
yield shape[:-1]
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for t_s, v_s in product(gen_shape(s), gen_shape(s)):
yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(eps=random.uniform(1e-6, 1e-3), reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r)
)
def gen_inputs():
for input, target, var, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, var, ), kwargs=kwargs)
return list(gen_inputs())
def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for s, r in _generate_sample_shape_reduction():
yield _make_tensor(s), _make_tensor(s), dict(reduction=r)
def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
def gen_inputs():
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
d['margin'] = random.uniform(-9, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
return list(gen_inputs())
def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs):
def gen_inputs():
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
d['delta'] = random.uniform(1e-3, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
return list(gen_inputs())
def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for li in (True, False):
for f in (True, False):
yield (
_make_tensor(s), _make_tensor(s),
dict(log_input=li, full=f, reduction=r)
)
yield (
_make_tensor(s), _make_tensor(s),
dict(log_input=li, full=f,
eps=random.uniform(1e-8, 1e-3),
reduction=r)
)
def gen_inputs():
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, ), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs):
make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shape = (3,)
batched_shape = (2, *shape)
shapes_and_kwargs = [
(shape, None),
(batched_shape, None),
(shape, dict(keepdim=True)),
(batched_shape, dict(keepdim=True)),
(shape, dict(p=5.0)),
(shape, dict(p=-1.0)),
(shape, dict(eps=1.0)),
]
return [
SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(upscale_factor=upscale_factor),
)
for upscale_factor in (1, 3)
]
def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(downscale_factor=downscale_factor),
)
for downscale_factor in (1, 3)
]
def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs):
samples = []
sample_shapes = [(), (S), (S, S, S)]
atols = [1e-2, 1e-16]
rtols = [1e-1, 0.5]
eps = 1e-8
for s, rtol, atol in product(sample_shapes, rtols, atols):
# close sample
t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
close = (t + atol).detach().requires_grad_(requires_grad)
close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(close_sample)
# random sample
a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(r_sample)
return samples
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo('exp'),
ForeachFuncInfo('acos'),
ForeachFuncInfo('asin'),
ForeachFuncInfo('atan'),
ForeachFuncInfo('cos'),
ForeachFuncInfo('cosh'),
ForeachFuncInfo('log'),
ForeachFuncInfo('log10'),
ForeachFuncInfo('log2'),
ForeachFuncInfo('tan'),
ForeachFuncInfo('tanh'),
ForeachFuncInfo('sin'),
ForeachFuncInfo('sinh'),
ForeachFuncInfo(
'neg',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
sample_inputs_func=sample_inputs_foreach,
safe_casts_outputs=False,
),
ForeachFuncInfo(
'sqrt',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
),
ForeachFuncInfo(
'ceil',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erf',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erfc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'expm1',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'floor',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'log1p',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'round',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'frac',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'reciprocal',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'sigmoid',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'trunc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'abs',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
safe_casts_outputs=False,
supports_forward_ad=True,
),
]
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"sub",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"mul",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
ForeachFuncInfo(
"div",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
"addcdiv",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
]
foreach_minmax_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"maximum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
ForeachFuncInfo(
"minimum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
]
foreach_reduce_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"norm",
dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
return np.where(
x < 0,
x - np.log1p(np.exp(x)),
-np.log1p(np.exp(-x)))
def reference_hardsigmoid(x):
intermediate = x / 6 + 0.5
y = np.clip(intermediate, 0, None)
return np.where(y > 1, 1, y).astype(x.dtype)
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype.
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
return scipy.special.polygamma(n, x).astype(np_dtype)
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def wrapper_set_seed(op, input, *args, **kwargs):
"""Wrapper to set seed manually for some functions like dropout
See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.
"""
torch.manual_seed(42)
return op(input, *args, **kwargs)
def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
if weight is None and bias is not None:
Y = Y + bias.reshape(-1)
elif weight is not None and bias is None:
Y = Y * weight.reshape(-1)
elif weight is not None and bias is not None:
Y = Y * weight.reshape(-1) + bias.reshape(-1)
return Y.reshape(*inp.shape)
def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5):
inp_view = inp
if np.prod(inp.shape) != 0:
inp_view = inp.reshape((inp.shape[0], num_groups, -1))
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
Y = Y.reshape(inp.shape)
if weight is not None:
# weight is a vector of length equal to the channel
if len(Y.shape) > 2:
weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:]))
Y = Y * weight
if bias is not None:
# bias is a vector of length equal to the channel
if len(Y.shape) > 2:
bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:]))
Y = Y + bias
return Y
# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't
# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into
# stacked 1D cases
def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):
side = 'right' if (right or side == 'right') else 'left'
if len(sorted_sequence.shape) == 1 :
ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)
return ret.astype(np.int32) if out_int32 else ret
elif sorted_sequence.shape[0] == 0:
if sorter is not None:
sorter = sorter.flatten()
ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)
ret = ret.astype(np.int32) if out_int32 else ret
return ret.reshape(boundary.shape)
else:
# numpy searchsorted only supports 1D inputs so we split up ND inputs
orig_shape = boundary.shape
num_splits = np.prod(sorted_sequence.shape[:-1])
splits = range(0, num_splits)
sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)
if sorter is not None:
sorter = sorter.reshape(num_splits, -1)
split_sequence = [sorted_sequence[i] for i in splits]
split_boundary = [boundary[i] for i in splits]
split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]
split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)
for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)]
split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret
return np.stack(split_ret).reshape(orig_shape)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.mH, *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrpper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
`idx` is used to specific which `args[idx]` is to be triangularized.
"""
triangular_arg = args[idx].triu() if upper else args[idx].tril()
return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs)
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get('mask')
if mask is not None:
output_mask = torch._masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def reference_reduction_numpy(f, supports_keepdims=True):
"""Wraps a NumPy reduction operator.
The wrapper function will forward dim, keepdim, mask, and identity
kwargs to the wrapped function as the NumPy equivalent axis,
keepdims, where, and initiak kwargs, respectively.
Args:
f: NumPy reduction operator to wrap
supports_keepdims (bool, optional): Whether the NumPy operator accepts
keepdims parameter. If it does not, the wrapper will manually unsqueeze
the reduced dimensions if it was called with keepdim=True. Defaults to True.
Returns:
Wrapped function
"""
@wraps(f)
def wrapper(x: np.ndarray, *args, **kwargs):
# Copy keys into a set
keys = set(kwargs.keys())
dim = kwargs.pop('dim', None)
keepdim = kwargs.pop('keepdim', False)
if 'dim' in keys:
dim = tuple(dim) if isinstance(dim, Sequence) else dim
# NumPy reductions don't accept dim=0 for scalar inputs
# so we convert it to None if and only if dim is equivalent
if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:
kwargs['axis'] = None
else:
kwargs['axis'] = dim
if 'keepdim' in keys and supports_keepdims:
kwargs['keepdims'] = keepdim
if 'mask' in keys:
mask = kwargs.pop('mask')
if mask is not None:
kwargs['where'] = mask.cpu().numpy()
if 'identity' in keys:
identity = kwargs.pop('identity')
if identity is not None:
if identity.dtype is torch.bfloat16:
identity = identity.cpu().to(torch.float32)
else:
identity = identity.cpu()
kwargs['initial'] = identity.numpy()
if 'unbiased' in keys:
unbiased = kwargs.pop('unbiased')
if unbiased is not None:
kwargs['ddof'] = int(unbiased)
result = f(x, *args, **kwargs)
# Unsqueeze reduced dimensions if NumPy does not support keepdims
if keepdim and not supports_keepdims and x.ndim > 0:
dim = list(range(x.ndim)) if dim is None else dim
result = np.expand_dims(result, dim)
return result
return wrapper
def reference_std_var(f):
"""Forwards unbiased/correction kwargs as NumPy's equivalent ddof"""
g = reference_reduction_numpy(f)
@wraps(g)
def wrapper(x: np.ndarray, *args, **kwargs):
assert not ('unbiased' in kwargs and 'correction' in kwargs)
if 'unbiased' in kwargs:
kwargs['ddof'] = int(kwargs.pop('unbiased'))
elif 'correction' in kwargs:
kwargs['ddof'] = kwargs.pop('correction')
return g(x, *args, **kwargs)
return wrapper
def generate_std_var_kwargs(t: torch.Tensor, **kwargs):
"""Generates unbiased/correction kwargs for std/var operators"""
yield ((), {'unbiased': True})
yield ((), {'unbiased': False})
# Currently, calling std with correction is only enabled when
# both dim and keepdim are provided.
if 'dim' in kwargs and 'keepdim' in kwargs:
yield ((), {'correction': 0})
yield ((), {'correction': 1})
numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()
yield ((), {'correction': numel // 2})
def ref_pairwise_distance(input1, input2):
pass
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
),
supports_inplace_autograd=False,
assert_autodiffed=True,
supports_sparse_csr=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cuda" not implemented for 'BFloat16'
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
),
# acosh is not defined at x < 1 (real) or |z| < 1 (complex)
reference_numerics_filter=NumericsFilter(
condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1),
safe_val=2)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \
else np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex64, torch.complex128)),
)),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True)),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2, python_scalars=True),
supports_inplace_autograd=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
)),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),
OpInfo('addmv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
backward_dtypesIfROCM=floating_types_and(torch.half),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_reference_testing')],
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# addbmm does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# https://github.com/pytorch/pytorch/issues/55907
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater else [],
torch.complex64, torch.complex128),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view', device_type='cuda')],
sample_inputs_func=sample_inputs_baddbmm),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_inplace_autograd=False,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.skip("Skipped!"),
'TestCommon',
'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_sparse_csr=True,
supports_forward_ad=True,
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('atan2',
aliases=('arctan2',),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_atan2,
),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('allclose',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=np.allclose,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_allclose,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('broadcast_tensors',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_broadcast_tensors),
OpInfo('block_diag',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_block_diag),
OpInfo('bitwise_and',
dtypes=integral_types_and(torch.bool),
supports_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
supports_autograd=False),
OpInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('combinations',
op=torch.combinations,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_autograd=False,
supports_out=False,
sample_inputs_func=sample_inputs_combinations),
OpInfo('cartesian_prod',
op=torch.cartesian_prod,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_autograd=False,
supports_out=False,
sample_inputs_func=sample_inputs_cartesian_prod,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"),
'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist,
skips=(
# RuntimeError: _cdist_backward requires X1 to be contiguous
DecorateInfo(unittest.skip("_cdist_backward requires X1 to be contiguous"),
'TestCommon', 'test_noncontiguous_samples'),
)
),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_types(),
# TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs
# with complex dtype.
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True
# for complex tensors
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# cholesky_inverse does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),)),
OpInfo('cholesky_solve',
op=torch.cholesky_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_cholesky_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# cholesky_solve does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),),),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_chunk,
supports_forward_ad=True,
supports_out=False),
OpInfo('clone',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_clone,
supports_forward_ad=True,
supports_out=False),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_contiguous,
supports_forward_ad=True,
autodiff_fusible_nodes=['aten::contiguous'],
assert_jit_shape_analysis=True,
supports_out=False),
OpInfo('sum_to_size',
op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sum_to_size,
supports_forward_ad=True,
supports_out=False,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),),
OpInfo('symeig',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_symeig,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
# NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors
OpInfo('clamp',
aliases=('clip',),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_clamp),
UnaryUfuncInfo('clamp',
variant_test_name='scalar',
aliases=('clip', ),
decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),
ref=np.clip,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/54841
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
),
sample_kwargs=sample_kwargs_clamp_scalar,
sample_inputs_func=sample_inputs_clamp_scalar),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_sparse=True,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"),
'TestSparseUnaryUfuncs', 'test_inplace'),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex,
skips=(
# RuntimeError: Tensor must have a last dimension with stride 1
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
)),
OpInfo('complex',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_complex,
supports_forward_ad=True,
),
OpInfo('copysign',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_copysign,
supports_inplace_autograd=False,
supports_forward_ad=True,
),
OpInfo('corrcoef',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_corrcoef,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_cov,
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cross',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True),
OpInfo('linalg.cross',
ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),
op=torch.linalg.cross,
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
aten_name='linalg_cross',
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True),
OpInfo('cumsum',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumsum does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumprod does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
OpInfo('diff',
op=torch.diff,
# np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append
# are set as None when converting to numpy
ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (
np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)
),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diff),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="trunc", python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="floor", python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
promotes_int_to_float=True,
sample_inputs_func=sample_inputs_binary_pwise,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
supports_out=False),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_expand_as,
supports_out=False),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal_scatter',
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_scatter),
OpInfo('eq',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('fmod',
ref=np.fmod,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
OpInfo('remainder',
ref=np.remainder,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('remainder',
ref=np.remainder,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True),
decorators=(
# Fails on XLA
# False is not true : Tensors failed to compare as equal!
# Attempted to compare equality of tensors with different dtypes
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
# Reference for disabling extremals
# https://github.com/pytorch/pytorch/issues/51948
handles_extremals=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
),
SpectralFuncInfo('fft.fft2',
aten_name='fft_fft2',
ref=np.fft.fft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
ref=np.fft.fftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
ref=np.fft.hfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.hfft2',
aten_name='fft_hfft2',
ref=scipy.fft.hfft2 if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.hfftn',
aten_name='fft_hfftn',
ref=scipy.fft.hfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
ref=np.fft.rfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfft2',
aten_name='fft_rfft2',
ref=np.fft.rfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
ref=np.fft.rfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
ref=np.fft.ifft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.ifft2',
aten_name='fft_ifft2',
ref=np.fft.ifft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
ref=np.fft.ifftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
ref=np.fft.ihfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False),
SpectralFuncInfo('fft.ihfft2',
aten_name='fft_ihfft2',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfftn',
aten_name='fft_ihfftn',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
ref=np.fft.irfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.irfft2',
aten_name='fft_irfft2',
ref=np.fft.irfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
ref=np.fft.irfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
OpInfo('fft.fftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),
supports_out=False,
supports_forward_ad=True,
),
OpInfo('fft.ifftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),
supports_out=False,
supports_forward_ad=True,
),
OpInfo('stft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! stft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_stft(*a, **kw)),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('istft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! istft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
# gradcheck fails on ROCm (gh-68429)
DecorateInfo(skipCUDAIfRocm, 'TestGradients', 'test_fn_grad'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_istft(*a, **kw)),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else _NOTHING,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_i0_i1),
UnaryUfuncInfo('special.i0e',
aten_name='special_i0e',
ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 3e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1',
aten_name='special_i1',
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True,
decorators=(
DecorateInfo(toleranceOverride({
torch.float32: tol(atol=1e-4, rtol=0),
torch.bool: tol(atol=1e-4, rtol=0)})),
),
skips=(
# TODO: FIXME: jiterator does not support casting to complex outs
DecorateInfo(unittest.skip("FIXME: Jiterator does not support complex outs!"),
"TestUnaryUfuncs",
"test_out_arg_all_dtypes",
device_type='cuda'),
)),
UnaryUfuncInfo('special.i1e',
aten_name='special_i1e',
ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.ndtr',
aten_name='special_ndtr',
decorators=(precisionOverride({torch.bfloat16: 5e-3,
torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),
safe_casts_outputs=True),
BinaryUfuncInfo('floor_divide',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[],
supports_out=False,
supports_forward_ad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
OpInfo('ge',
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_geqrf,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('gt',
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=(
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient),
OpInfo('inverse',
op=torch.inverse,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('isin',
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin,
skips=(
# https://github.com/pytorch/pytorch/issues/67432
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), # noqa: B950
)),
OpInfo('kthvalue',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kthvalue,
error_inputs_func=error_inputs_kthvalue),
OpInfo('le',
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('linalg.det',
op=torch.linalg.det,
aliases=('det', ),
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67512
DecorateInfo(unittest.skip("67512"), 'TestCommon', 'test_noncontiguous_samples'),
)),
OpInfo('linalg.det',
op=torch.linalg.det,
variant_test_name='singular',
aliases=('det', ),
dtypes=double_types(),
backward_dtypes=double_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67512
DecorateInfo(unittest.skip("67512"), 'TestCommon', 'test_noncontiguous_samples'),
# Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed
# Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cuda',
dtypes=(torch.complex128,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
# This test fails because singular inputs cannot be reliably
# generated unless we're using double types
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_dtypes'),
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_backward',
dtypes=(torch.float32, torch.complex64,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
dtypes=floating_and_complex_types(),
# TODO: RuntimeError: While computing batched gradients,
# got: vmap: Calling Tensor.as_strided is not supported
# unless the batch dims being vmapped over are at the front of the tensor (in memory layout).
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.cond',
aten_name='linalg_cond',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.eig',
aten_name='linalg_eig',
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_eig,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Disabled due to https://github.com/pytorch/pytorch/issues/67367
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),),
),
OpInfo('linalg.eigvals',
aten_name='linalg_eigvals',
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Disabled due to https://github.com/pytorch/pytorch/issues/67367
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.eigh',
aten_name='linalg_eigh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# See: https://github.com/pytorch/pytorch/issues/67367
# This DecorateInfo should change to `dtypes=complex_dtypes()` after the above
# has been resolved.
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=floating_and_complex_types()),),
),
OpInfo('linalg.eigvalsh',
aten_name='linalg_eigvalsh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Gradcheck for complex is not implemented yet
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.householder_product',
aten_name='linalg_householder_product',
op=torch.linalg.householder_product,
aliases=('orgqr', ),
dtypes=floating_and_complex_types(),
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[
skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})),
]),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# we skip gradient checks for this suite as they are tested in
# variant_test_name='grad_oriented'
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
)),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
variant_test_name='grad_oriented',
# gradchecks for forward AD fails with multi-Tensor outputs
op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0],
supports_out=False,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_lstsq,
supports_autograd=True,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# tests do not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_matrix_power',
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
supports_forward_ad=True,
check_batched_grad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
sample_inputs_func=sample_inputs_linalg_matrix_power,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),),
OpInfo('linalg.multi_dot',
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name='linalg_multi_dot',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples'),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
)),
OpInfo('linalg.norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_norm,
aten_name='linalg_norm',
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
dtypes=floating_and_complex_types(),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('linalg.qr',
aten_name='linalg_qr',
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_qr,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_slogdet,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
)),
OpInfo('linalg.vector_norm',
op=torch.linalg.vector_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_vector_norm,
aten_name='linalg_vector_norm'),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log10(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
# log2(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
BinaryUfuncInfo('ldexp',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise,
promotes_int_to_float=True,
supports_out=True,
skips=(
# RuntimeError: mul(): functions with out=... arguments don't support
# automatic differentiation, but one of the arguments requires grad
# https://github.com/pytorch/pytorch/issues/68966
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# FIXME: ldexp does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
),
decorators=[
DecorateInfo(
toleranceOverride({
torch.complex64: tol(atol=1e-05, rtol=1e-05)
}),
'TestCommon', device_type='cpu',
),
], ),
OpInfo('logaddexp',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
OpInfo('logaddexp2',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_autograd=False,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
OpInfo('lt',
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'lu'.:
# File "<string>", line 3
# def the_method(i0):
# return i0.lu(True, True)
# ~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_lu_solve,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# RuntimeError: lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
)),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
supports_out=True,
sample_inputs_func=sample_inputs_lu_unpack,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# LU_pivots is expected to be a contiguous tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
# cuda gradchecks are slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda'),
)),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_fill,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_masked_select),
OpInfo('matrix_exp',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
aliases=('linalg.matrix_exp',),
sample_inputs_func=sample_inputs_matrix_exp,
# Needs to construct a 2nx2n matrix by copy_ ing into it
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
),
OpInfo('matmul',
aliases=('linalg.matmul',),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_matmul,
skips=(
# ROCm intermittently fails the test with standard atol/rtol
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ROCM),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestOpInfo',
device_type='xla', dtypes=(torch.long,)),
)),
OpInfo('max',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True),
OpInfo('max',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('median',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of median do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/67539
DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ASAN, device_type='cpu'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: review with var_mean tests in test_autograd.py
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/67539
DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ASAN, device_type='cpu'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: fix along with var_mean autograd tests
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
ref=np.meshgrid,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_forward_ad=True),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_forward_ad=True),
OpInfo('min',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True),
OpInfo('min',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
BinaryUfuncInfo(
'max',
aliases=('maximum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,
ref=np.maximum,
skips=(
# FIXME: maximum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'maximum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,
ref=np.maximum,
skips=(
# FIXME: maximum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'min',
aliases=('minimum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,
ref=np.minimum,
skips=(
# FIXME: min does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'minimum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,
ref=np.minimum,
skips=(
# FIXME: minimum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo('logical_and',
ref=np.logical_and,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_and does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('logical_or',
ref=np.logical_or,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_or does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('logical_xor',
ref=np.logical_xor,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_xor does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('bitwise_or',
ref=np.bitwise_or,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_xor',
ref=np.bitwise_xor,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('heaviside',
ref=lambda a, b: (
# necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64
np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)
),
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
# FIXME: heaviside does not accept scalar inputs
skips=(
# NumPy's heaviside promotes bool to float16
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_heavisidel',
dtypes=(torch.bool,)),
# RuntimeError: heaviside is not yet implemented for tensors with different dtypes.
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
# PyTorch's heaviside does not appear to propagate NaNs
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('lcm',
ref=np.lcm,
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: lcm doesn't support scalars
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('gcd',
ref=np.gcd,
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
# TODO: FIXME: jiterator doesn't support non-tensor inputs
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_broadcast_python_scalar'),
# TODO: FIXME: jiterator doesn't support casts to unsupported types
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'))),
BinaryUfuncInfo('isclose',
ref=np.isclose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_isclose,
supports_autograd=False,
supports_out=False,
skips=(
# RuntimeError: Short did not match Int
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
# FIXME: isclose does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
aten_name='softmax',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
supports_out=False),
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_out=False),
# `softmin` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('nn.functional.softmin',
aten_name='softmin',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=False,
assert_autodiffed=False,
supports_out=False),
OpInfo('nn.functional.softmin',
variant_test_name="with_dtype",
aten_name='softmin',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=False,
supports_out=False),
OpInfo(
"nn.functional.cross_entropy",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cross_entropy,
supports_out=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
device_type="cpu",
),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536
# test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked
# 1536 bytes CUDA memory on device 0
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
)
),
OpInfo('nn.functional.normalize',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(onlyNativeDeviceTypes,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax),
OpInfo('as_strided',
op=lambda x, size, stride, storage_offset=0:
torch.as_strided(x, size, stride, storage_offset=storage_offset),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_as_strided,
skips=(
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
# With rtol=1e-07 and atol=1e-07, found 1 element(s) (out of 1) whose difference(s)
# exceeded the margin of error (including 0 nan comparisons). The greatest difference
# was 1.0 (1.0 vs. -0.0), which occurred at index 0.
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# AssertionError: False is not true : Scalars failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),)),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_cosine_similarity),
OpInfo('nn.functional.adaptive_avg_pool1d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool1d),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.adaptive_avg_pool3d',
dtypes=floating_types_and(torch.half),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool3d),
OpInfo('nn.functional.adaptive_max_pool1d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool1d),
OpInfo('nn.functional.adaptive_max_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool2d),
OpInfo('nn.functional.adaptive_max_pool3d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool3d),
OpInfo('nn.functional.avg_pool1d',
aten_name='avg_pool1d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool1d),
OpInfo('nn.functional.avg_pool3d',
aten_name='avg_pool3d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool3d),
OpInfo('nn.functional.relu',
aten_name="relu",
supports_autograd=True,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False,
supports_forward_ad=True),
OpInfo('nn.functional.conv_transpose1d',
aten_name='conv_transpose1d',
aliases=('conv_transpose1d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose1d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose3d',
aten_name='conv_transpose3d',
aliases=('conv_transpose3d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv1d',
aliases=('conv1d',),
aten_name='conv1d',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv1d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv2d',
aliases=('conv2d',),
aten_name='conv2d',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=partial(sample_inputs_conv2d),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.group_norm',
aten_name='group_norm',
aliases=('group_norm',),
ref=reference_group_norm,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_group_norm,),
OpInfo('nn.functional.instance_norm',
# no ref because instance_norm will often have numerical instability (large numbers or nan)
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_instance_norm,),
OpInfo('nn.functional.layer_norm',
aten_name='layer_norm',
aliases=('layer_norm',),
ref=reference_layer_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),
'TestCommon', 'test_reference_testing'
)
],
sample_inputs_func=sample_inputs_layer_norm,),
OpInfo('nn.functional.local_response_norm',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_local_response_norm,),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
check_batched_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_unfold,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_bilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_nearest',
supports_autograd=True,
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
sample_inputs_func=sample_inputs_leaky_relu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_avgpool2d),
OpInfo('nn.functional.fractional_max_pool2d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool2d,
decorators=[
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# FIXME: produces incorrect output on non-contiguous inputs
# https://github.com/pytorch/pytorch/issues/69325
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
], ),
OpInfo('nn.functional.fractional_max_pool3d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool3d,
decorators=[
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# FIXME: produces incorrect output on non-contiguous inputs
# https://github.com/pytorch/pytorch/issues/69325
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
], ),
OpInfo('nn.functional.max_pool1d',
aten_name='max_pool1d',
supports_autograd=True,
supports_out=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool2d',
aten_name='max_pool2d',
supports_autograd=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_gradgrad=False,
supports_out=False,
assert_jit_shape_analysis=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool3d',
aten_name='max_pool3d',
supports_autograd=True,
supports_out=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
# TODO: investigate nondeterminism
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.linear',
aten_name='linear',
supports_autograd=True,
sample_inputs_func=sample_inputs_linear,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if CUDA11OrLater else []),
# linear calls mm under the hood which is nondeterministic on CUDA
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('nn.functional.bilinear',
aten_name='bilinear',
supports_autograd=True,
sample_inputs_func=sample_inputs_bilinear,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+ and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
),
supports_forward_ad=False,
supports_out=False),
OpInfo('nn.functional.glu',
aten_name='glu',
supports_autograd=True,
sample_inputs_func=sample_inputs_glu,
dtypes=floating_types(),
dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_out=False),
UnaryUfuncInfo(
'nn.functional.elu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.elu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
OpInfo(
'nn.functional.prelu',
ref=lambda x, weight:
np.maximum(0., x) + np.minimum(0., x) *
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_nn_functional_prelu,
decorators=[
# FIXME: second derivative is implemented but seems to be incorrect
# https://github.com/pytorch/pytorch/issues/68760
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
# https://github.com/pytorch/pytorch/issues/68752
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ],
),
UnaryUfuncInfo(
'nn.functional.celu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.celu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.rrelu',
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
),
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(
unittest.skip("Skipped!"),
'TestJit', 'test_variant_consistency_jit'
), ],
),
UnaryUfuncInfo(
'nn.functional.selu',
ref=lambda x, inplace=False:
1.0507009873554804934193349852946 * (
np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))
),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-2, rtol=1.8e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.silu',
ref=lambda x, inplace=False:
x / (1 + np.exp(-x)),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=False,
assert_autodiffed=False,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=[
# FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', dtypes=(torch.complex64,)), ],
),
UnaryUfuncInfo(
'nn.functional.hardsigmoid',
ref=reference_hardsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=False,
supports_out=False,
inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],
skips=[
# still want to test that first derivative works though second derivative isn't supported
DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_gradgrad"),
# produces 0 instead of nan on ROCM
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.bfloat16, torch.float16, torch.float32,), device_type='cuda',
active_if=(TEST_WITH_ROCM)), ]
),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
ref=reference_logsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
),
UnaryUfuncInfo(
'nn.functional.mish',
ref=lambda x: x * np.tanh(reference_softplus(x)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.mish, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ],
),
UnaryUfuncInfo(
'nn.functional.softsign',
ref=lambda x: x / (np.abs(x) + 1),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],
skips=(
# pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
"test_reference_numerics_hard", dtypes=(torch.complex64,)),),
),
UnaryUfuncInfo(
'nn.functional.tanhshrink',
ref=lambda x: x - np.tanh(x),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), ],
skips=(
# in each case, pytorch will produce a nan while numpy will not
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_normal",
dtypes=(torch.complex64,), active_if=(IS_MACOS)),
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_hard",
dtypes=(torch.complex64,), active_if=(IS_MACOS)),
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.complex64,), device_type='cpu',
active_if=(IS_MACOS or IS_WINDOWS)),)
),
OpInfo(
'nn.functional.threshold',
ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_threshold,
),
OpInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_nextafter),
OpInfo('topk',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_topk),
# Multiple variants for batch_norm to test with and without cuDNN disabled
# See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details
OpInfo('nn.functional.batch_norm',
aten_name='batch_norm',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_batch_norm),
# This variant tests batch_norm with cuDNN disabled only on CUDA devices
OpInfo('nn.functional.batch_norm',
variant_test_name='without_cudnn',
aten_name='batch_norm',
dtypes=empty_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[onlyCUDA, disablecuDNN],
sample_inputs_func=sample_inputs_batch_norm),
# We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the
# standard entry, second is to run gradcheck tests on the second argument.
OpInfo('igamma',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammainc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igamma',
variant_test_name='grad_other',
# Since autograd formula is implemented only for other and
# gradcheck test verifies the formula for input in SampleInput,
# we permute the arguments.
op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),
inplace_variant=None,
method_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types(),
backward_dtypesIfCUDA=floating_types(),
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igammac',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammaincc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igammac',
variant_test_name='grad_other',
# Since autograd formula is implemented only for other and
# gradcheck test verifies the formula for input in SampleInput,
# we permute the arguments
op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),
inplace_variant=None,
method_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types(),
backward_dtypesIfCUDA=floating_types(),
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('nn.functional.softshrink',
aten_name="softshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=False,
),
OpInfo('nn.functional.hardshrink',
aten_name="hardshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
OpInfo('nn.functional.hardtanh',
aten_name="hardtanh",
dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),
backward_dtypesIfCPU=all_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"],
),
OpInfo('nn.functional.gelu',
aten_name="gelu",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::gelu"]),
OpInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types_and(torch.bfloat16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mm),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mode,),
MvlGammaInfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
OpInfo('ne',
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_narrow),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
error_inputs_func=error_inputs_neg,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True,),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_dist),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
OpInfo('permute',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_permute),
OpInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
# Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=floating_and_complex_types_and(torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_pow,
supports_inplace_autograd=False,
supports_forward_ad=True,
assert_autodiffed=True,
),
OpInfo('float_power',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_pow,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view', device_type='cuda'),),),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr,
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('roll',
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_roll),
OpInfo('rot90',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_rot90),
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True,),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.complex64]),
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
dtypes=complex_types()),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_forward_ad=True,
supports_out=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_autodiffed=True),
OpInfo('split',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_forward_ad=True,
supports_out=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_split_with_sizes,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __radd__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__radd__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::add'],),
OpInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rdiv__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rdiv__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
supports_forward_ad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
OpInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rmul__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rmul__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
OpInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],
torch.complex64, torch.complex128),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],
torch.complex64, torch.complex128),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_matmul,
supports_out=False,
decorators=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view'),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
),
skips=(
# RuntimeError:
# object has no attribute __rmatmul__:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.__rmatmul__(i0, i1)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=floating_types_and(torch.bfloat16, torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rmod__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rmod__(i0, 3.14)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
OpInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_forward_ad=True,
skips=(
# RuntimeError:
# object has no attribute __rpow__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rpow__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
OpInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rsub__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rsub__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_tensor',
supports_out=False,
supports_inplace_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":52,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950
),
sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_scalar',
supports_out=False,
supports_inplace_autograd=False,
sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":52,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950
),
assert_autodiffed=True,),
OpInfo('select',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_out=False),
OpInfo('select_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select_scatter,
supports_forward_ad=True,
supports_out=False),
OpInfo('slice_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_slice_scatter,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False,),
OpInfo('solve',
op=torch.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
aliases=('nn.functional.tanh',),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "tanh_backward_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# alias, nn.functional.tanh, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "tanh" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(j * pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 0.5j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma]),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
handles_complex_extremals=False),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
safe_casts_outputs=True,
handles_complex_extremals=False),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.inv_ex',
aten_name='linalg_inv_ex',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse_csr=True,
supports_complex_to_float=True,
skips=(
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),),),
UnaryUfuncInfo('isfinite',
ref=np.isfinite,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/66402
DecorateInfo(unittest.expectedFailure, "TestUnaryUfuncs", "test_reference_numerics_hard",
device_type='cpu', dtypes=(torch.complex64,), active_if=not (IS_MACOS or IS_WINDOWS)),
)),
UnaryUfuncInfo('isinf',
ref=np.isinf,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isposinf',
ref=np.isposinf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isneginf',
ref=np.isneginf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isreal',
ref=np.isreal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isnan',
ref=np.isnan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.solve_triangular',
aten_name='linalg_solve_triangular',
op=torch.linalg.solve_triangular,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve_triangular,
# linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);
supports_forward_ad=True),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# errors with "leaked XXXX bytes CUDA memory on device 0"
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
)),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='singular',
# pinv is Frechet-differentiable in a rank-preserving neighborhood,
# so we feed inputs that are the products of two full-rank factors,
# to avoid any rank changes caused by the perturbations in the gradcheck
op=lambda a, b: torch.linalg.pinv(a @ b.mT),
dtypes=floating_and_complex_types(),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv_singular,
# Only large tensors show issues with implicit backward used prior to
# explicit backward implementation.
decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('eig',
op=torch.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_eig,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
skipCUDAIfRocm
],),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
# See https://github.com/pytorch/pytorch/issues/66357
sample_inputs_func=sample_inputs_einsum,
skips=(
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svdvals',
op=torch.linalg.svdvals,
aten_name='linalg_svdvals',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svdvals,
check_batched_gradgrad=False,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack]),
OpInfo('polar',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_polar),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo('special.polygamma',
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name='special_polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_1',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_2',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_3',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_4',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo('ravel',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_reshape,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_reshape_as,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_view_reshape,
),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_view_as_reshape_as,
),
OpInfo('atleast_1d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_atleast1d2d3d,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
),
OpInfo('atleast_2d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('atleast_3d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_fill),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_copy,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_index_select,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_add,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
),
assert_jit_shape_analysis=False, # TODO: support index.Tensor()
sample_inputs_func=sample_inputs_getitem,),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
# RuntimeError: The following operation failed in the TorchScript interpreter.
# Traceback of TorchScript (most recent call last):
# File "<string>", line 3, in forward
# def the_method(i0, i1: List[torch.Tensor], i2):
# return torch.index_put(i0, i1, i2, accumulate=False)
# ~~~~~~~~~~~~~~~ <--- HERE
# RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_sort,
skips=(
# sort does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError not raised
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
)),
OpInfo('unique',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False,
skips=(
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'unique'.:
# File "<string>", line 3
#
# def the_method(i0):
# return i0.unique(sorted=False, return_inverse=False, return_counts=False, dim=None)
# ~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('unique_consecutive',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique_consecutive,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter,),
OpInfo('bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_forward_ad=True,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('empty_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('zeros_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('ones_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('randn_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('rand_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('randint_like',
dtypes=all_types_and(torch.half, torch.bfloat16),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_randint_like,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('full_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_full_like,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('new_zeros',
op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_ones',
op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_empty',
op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_full',
op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_full,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_add,
supports_out=False
),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('hypot',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hypot,
),
OpInfo('histogram',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Not Implemented on XLA.
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla'),
)),
OpInfo('histogramdd',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU
sample_inputs_func=sample_inputs_histogramdd,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('histc',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),
sample_inputs_func=sample_inputs_histc,
supports_out=True,
supports_autograd=False,
skips=(
# CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor
# "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast
# from a result of dtype torch.float32 into an out= with dtype torch.long"
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
)),
OpInfo('bincount',
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_bincount,
supports_out=False,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bucketize',
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_bucketize,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('searchsorted',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_searchsorted,
supports_autograd=False,
ref=reference_searchsorted,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cat',
ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),
aliases=('concat',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cat_concat,
supports_forward_ad=True,
assert_autodiffed=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
check_batched_gradgrad=False,
skips=(
# msort does not correctly warn when resizing out= inputs.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Expected RuntimeError when doing an unsafe cast from a result of dtype
# torch.float32 into an out= with dtype torch.long
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
),
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('squeeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze),
OpInfo('fill_',
op=lambda x, scalar: torch.fill_(x.clone(), scalar),
method_variant=None,
inplace_variant=torch.Tensor.fill_,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_fill_),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=torch.Tensor.resize_,
# the test fails because resize_ doesn't work with imag views as expected by the test
# https://github.com/pytorch/pytorch/issues/65945
test_neg_view=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# resize_ is raising an error on input that requires grad on purpose
DecorateInfo(
unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),
'TestGradients',
'test_nondifferentiable',
),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# resize_ is raising an error on input that requires grad on purpose
DecorateInfo(
unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),
'TestGradients',
'test_nondifferentiable',
),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_cumulative_trapezoid),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
sample_inputs_func=sample_unsqueeze),
OpInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_xlogy),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('special.xlog1py',
aten_name='special_xlog1py',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_xlog1py),
OpInfo('special.zeta',
aten_name='special_zeta',
dtypes=all_types_and(torch.bool),
supports_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_binary_pwise),
# OpInfo entry to verify the gradient formula of `other`/`q`
OpInfo('special.zeta',
op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
aten_name='special_zeta',
variant_test_name='grad',
dtypes=all_types_and(torch.bool),
supports_autograd=True,
safe_casts_outputs=True,
skips=(
# Lambda doesn't work in JIT test
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),
),
sample_inputs_func=sample_inputs_zeta),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
aliases=('swapdims', 'swapaxes'),
assert_jit_shape_analysis=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('T',
op=lambda x: x.T,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('H',
op=lambda x: x.H,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('mT',
op=lambda x: x.mT,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('mH',
op=lambda x: x.mH,
aliases=('adjoint',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kron),
OpInfo('inner',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend
DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# JIT has issue when op is passed as lambda
# NotImplementedError: Cannot access storage of SparseTensorImpl
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Allowed exception: sparse tensors don't have strides
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)
),
OpInfo('logcumsumexp',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', 'nn.functional.sigmoid'),
ref=reference_sigmoid if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# TODO: FIXME: sigmoid fails on complex inputs that require grad
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
# alias, nn.functional.sigmoid, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "sigmoid" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True,
# sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 1j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('special.entr',
ref=scipy.special.entr if TEST_SCIPY else _NOTHING,
aten_name='special_entr',
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-1,
torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_entr),
UnaryUfuncInfo('special.ndtri',
ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aten_name='special_ndtri',
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else _NOTHING,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_sparse=True,
supports_sparse_csr=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_sparse_csr=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
)),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else _NOTHING,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
safe_casts_outputs=True,
# lgamma have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo(
'logdet',
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_logdet,
decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True),
UnaryUfuncInfo('logit',
ref=scipy.special.logit if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit,
safe_casts_outputs=True),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other: torch.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
supports_out=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),
OpInfo('nonzero',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_nonzero,
supports_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67458
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# nonzero is not raising a warning when the out is resized
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo('norm',
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16)),
OpInfo('norm',
variant_test_name='nuc',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)
),
OpInfo('norm',
variant_test_name='fro',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)),
OpInfo('norm',
variant_test_name='inf',
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# https://github.com/pytorch/pytorch/issues/67517
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# following 2 tests failed intermittenly
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950
)
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,),
UnaryUfuncInfo('special.erfcx',
ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,
aten_name='special_erfcx',
decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
OpInfo(
"nn.functional.dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# inplace variant dispatches to dropout kernel, while on CUDA
# the op dispatches to _fused_dropout (with a few more conditions)
# hence, different values and this skip here
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
# On CUDA, the op is dispatched (and a few more conditions) to
# _fused_dropout, which doesn't support forward AD
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
ref=_NOTHING,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.embedding",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
skips=(
# Does not work with lambda
# Raises : JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Reference: https://github.com/pytorch/pytorch/issues/67084
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
),
supports_out=False,
),
OpInfo(
"nn.functional.embedding_bag",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
# backward is not supported for mode `max` and dtype `bfloat16`
backward_dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_embedding_bag,
skips=(
# Does not work with lambda
# Raises : JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
supports_gradgrad=False,
),
OpInfo(
"nn.functional.softplus",
ref=reference_softplus,
sample_inputs_func=sample_inputs_softplus,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"linalg.tensorsolve",
ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorsolve,
supports_forward_ad=True,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"nn.functional.mse_loss",
ref=reference_mse_loss,
sample_inputs_func=sample_inputs_mse_loss,
supports_out=False,
dtypes=floating_types_and(torch.float16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.grid_sample",
ref=_NOTHING,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15),
OpInfo(
"argwhere",
ref=np.argwhere,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_argwhere,
),
ReductionOpInfo(
'all',
identity=True,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.all),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.any),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
ref=reference_reduction_numpy(np.count_nonzero),
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'mean',
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True,
assert_jit_shape_analysis=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.mean),
skips=(
# FIXME: mean does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: mean reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: mean does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'nanmean',
nan_policy='omit',
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nanmean),
skips=(
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
device_type='cuda', dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'std',
nan_policy='propagate',
supports_out=False,
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.std),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=None not supported
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# TODO(@heitorschueroff) std return float for complex types
# need to find a better way to model result dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'var',
nan_policy='propagate',
supports_out=False,
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.var),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=None not supported
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# TODO(@heitorschueroff) std return float for complex types
# need to find a better way to model result dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
supports_out=False,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_prod,
ref=reference_reduction_numpy(np.prod),
skips=(
# FIXME: prod does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16, torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.uint8, torch.float16, torch.complex64]),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.sum),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: sum does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=False,
promotes_int_to_int64=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.nansum),
skips=(
# FIXME: nansum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: nansum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: nansum does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'_masked.sum',
ref=reference_reduction_numpy(np.sum),
method_variant=None,
identity=0,
nan_policy='propagate',
supports_out=False,
promotes_int_to_int64=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),
'TestReductions', 'test_ref_small_input'),
],
sample_inputs_func=sample_inputs_masked_reduction
),
ReductionOpInfo(
'_masked.prod',
ref=reference_reduction_numpy(np.prod),
method_variant=None,
identity=1,
nan_policy='propagate',
supports_out=False,
promotes_int_to_int64=True,
# FIXME: "prod_cpu" not implemented for 'BFloat16'
# FIXME: "prod_cpu" not implemented for 'Half'
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_ref_duplicate_values'),
],
sample_inputs_func=sample_inputs_masked_reduction
),
ReductionOpInfo(
'_masked.amax',
nan_policy='propagate',
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.amin',
nan_policy='propagate',
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.mean',
ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.norm',
identity=0,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# torch.jit.frontend.NotSupportedError: Compiled functions
# can't take variable number of arguments or use
# keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_norm,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.var',
ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_ref_small_input'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
OpInfo(
'_masked.softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.log_softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.softmin',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.normalize',
method_variant=None,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_normalize,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "clamp_min_cpu" not implemented for 'Half'
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_reference_masked',
device_type='cpu', dtypes=[torch.half]),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
"nn.functional.ctc_loss",
ref=_NOTHING,
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_ctc_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67462
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_grad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
# Operation calls data_ptr() somewhere; needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo(
"nn.functional.cosine_embedding_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
sample_inputs_func=sample_inputs_cosine_embedding_loss,
),
OpInfo(
"nn.functional.nll_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
skips=(
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.gaussian_nll_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_gaussian_nll_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.hinge_embedding_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_hinge_embedding_loss,
),
OpInfo(
"nn.functional.huber_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_huber_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
)
),
OpInfo(
"nn.functional.poisson_nll_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_poisson_nll_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67461
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_grad",
dtypes=(torch.float64,),
),
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_forward_mode_AD",
dtypes=(torch.float64,),
),
),
),
OpInfo(
"argsort",
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_argsort,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"repeat_interleave",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_repeat_interleave,
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pairwise_distance",
ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (
np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)
),
sample_inputs_func=sample_inputs_pairwise_distance,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_shuffle",
sample_inputs_func=sample_inputs_pixel_shuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_unshuffle",
sample_inputs_func=sample_inputs_pixel_unshuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
)
]
# Common operator groupings
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]
sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]
reference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)]
reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')]
# TODO: review porting these to make_tensor
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove
# these from here
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.triu(offset).nonzero().to(dtype).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
# TODO: move into common_utils.py or the test suite(s) that use this
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
pass
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
# TODO: move into common_utils.py or the test suite(s) that use this
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
if arg.tensor.dtype == torch.float:
return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))
if arg.tensor.dtype == torch.cfloat:
return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
| from functools import wraps, partial
from itertools import product, chain
import itertools
import collections
import copy
from enum import Enum
import operator
import random
import unittest
import math
import torch
import numpy as np
from torch._six import inf
import collections.abc
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from torch.testing import make_non_contiguous, make_tensor
from torch.testing._internal.common_dtype import (
_dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types,
floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and,
all_types, double_types, empty_types
)
from torch.testing._internal.common_device_type import \
(onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, precisionOverride,
toleranceOverride, tol, has_cusolver)
from torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater, SM60OrLater
from torch.testing._internal.common_utils import \
(is_iterable_of_tensors,
random_symmetric_matrix, random_symmetric_psd_matrix,
make_fullrank_matrices_with_distinct_singular_values,
random_symmetric_pd_matrix, make_symmetric_matrices,
make_symmetric_pd_matrices, random_square_matrix_of_rank,
random_fullrank_matrix_distinct_singular_value,
TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY,
torch_to_numpy_dtype_dict, TEST_WITH_ASAN,
GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like)
import torch.testing._internal.opinfo_helper as opinfo_helper
from setuptools import distutils
has_scipy_fft = False
if TEST_SCIPY:
import scipy.special
try:
import scipy.fft
has_scipy_fft = True
except ModuleNotFoundError:
pass
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
# Unique value to distinguish default from anything else
_NOTHING = object()
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
# op with TensorList inputs do not support method or inplace variants.
assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)
self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)
return (sample_tt_input, tt_args, tt_kwargs)
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
if isinstance(t, torch.dtype):
return t
return self.transform(to_noncontiguous)
class ErrorInput(object):
"""
A SampleInput that will cause the operation to throw an error plus information
about the resulting error.
"""
__slots__ = ['sample_input', 'error_type', 'error_regex']
def __init__(self, sample_input, *, error_type, error_regex):
self.sample_input = sample_input
self.error_type = error_type
self.error_regex = error_regex
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
# test if a tensor is close to an integer
def close_to_int(x, eps=0.1):
if x.is_complex():
y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x))))
else:
y = torch.abs(torch.frac(x))
return (y < eps) | (y > (1 - eps))
NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val'])
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return a list of SampleInputs (see the class description above).
# Each SampleInput defines an "input", "args", "kwargs",
# an "output_process_fn_grad" function, the "broadcasts_input" bool and
# a "name".
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach can be frustrating to writing an OpInfo can
# be frustrating, but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in opinfo_helper.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
def __init__(self,
name, # the string name of the function
*,
ref=None, # An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
# the following metadata describes the operator, its variants,
# and its aliases, if any
aliases=None, # iterable of aliases, e.g. ("absolute",) for torch.abs
variant_test_name='', # additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
op=None, # the function variant of the operation, populated as torch.<name> if None
method_variant=_NOTHING, # explicitly specifies the method variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
inplace_variant=_NOTHING, # explicitly specifies the inplace variant of the operator
# if _NOTHING (default), the method variant will be autopopulated
# if None, then the OpInfo specifies no method variant
# the following metadata are test directives for skipping or
# modifying tests
skips=tuple(), # information about which tests to skip
decorators=tuple(), # decorators to apply to generated tests
# the following are pointers to functions to generate certain classes
# of inputs
sample_inputs_func=None, # function to generate sample inputs
error_inputs_func=None, # function to generate inputs that will throw errors
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
dtypes, # dtypes this function works with on the CPU,
# inherited by other device types that don't specify their own dtypes
# the following dtypesIf... options override the dtypes value
# on their respective device types
dtypesIfCPU=None, # dtypes this function is expected to work with on the CPU,
# typically unnecessary since it's (now) redundant with the dtypes kwarg above
dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA
dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM
backward_dtypes=None, # backward dtypes this function is expected to work with
backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU
backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA
backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM
default_test_dtypes=None, # dtypes to test with by default. Tests are instantiated with
# these dtypes for the op unless otherwise specified.
# This is helpful in reducing the test matrix.
# the following metadata describes the operators out= support
supports_out=True, # whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments
# the following metadata relates to autograd support
supports_autograd=True, # whether the operation supports backward mode AD
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_gradgrad=None, # whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# defaults to support_autograd's value
supports_inplace_autograd=None, # whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_forward_ad=False, # Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck
check_batched_grad=None, # whether to check batched grad when doing gradcheck
# defaults to support_autograd's value
check_batched_gradgrad=None, # whether to check batched grad grad when doing gradgradcheck
# default's to support_gradgrad's value
check_batched_forward_grad=None, # whether to check batched forward grad when doing gradcheck
# defaults to the value of `supports_forward_ad and check_batched_grad`
gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck
gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
aten_name=None, # name of the corresponding aten:: operator
assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed
autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
# the following metadata relates to sparse support and is used in test_sparse.py
supports_sparse=False, # whether the op supports sparse inputs
supports_scripting=True, # only run tracing tests
# the following metadata relates to sparse csr support and is used in test_sparse_csr.py
supports_sparse_csr=False, # whether the op supports sparse csr inputs
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples=True,
test_neg_view=True,
assert_jit_shape_analysis=False, # assert that jit shape analysis fully propagates shape
):
dtypes_args = (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
self.name = name
self.ref = ref
self.aten_name = aten_name if aten_name is not None else name
self.variant_test_name = variant_test_name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes
self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCPU if dtypesIfCPU is not None
else dtypes)
self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (
backward_dtypes if backward_dtypes is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (
backward_dtypesIfCUDA if backward_dtypesIfCUDA is not None
else backward_dtypes if backward_dtypes is not None
else dtypesIfROCM if dtypesIfROCM is not None
else dtypesIfCUDA if dtypesIfCUDA is not None
else dtypes)
self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes
self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA
self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
self.op = op if op else _getattr_qual(torch, self.name)
method_variant = getattr(torch.Tensor, name, None) if method_variant is _NOTHING else method_variant
# attributes like real, imag are not callable
self.method_variant = method_variant if callable(method_variant) else None
inplace_name = name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None) \
if inplace_variant is _NOTHING else inplace_variant
self.operator_variant = getattr(operator, name, None)
self.supports_out = supports_out
self.safe_casts_outputs = safe_casts_outputs
self.decorators = (*decorators, *skips)
self.sample_inputs_func = sample_inputs_func
self.error_inputs_func = error_inputs_func
self.assert_autodiffed = assert_autodiffed
self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []
if autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
else:
self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes
# Autograd support
# Autograd flags that don't depend on backward AD
self.supports_autograd = supports_autograd
self.supports_forward_ad = supports_forward_ad
self.gradcheck_fast_mode = gradcheck_fast_mode
self.gradcheck_wrapper = gradcheck_wrapper
self.gradcheck_nondet_tol = gradcheck_nondet_tol
# Autograd flags that depend on backward AD only
# - If setting has been explicitly set, raise error if inconsistent
if supports_gradgrad is None:
supports_gradgrad = supports_autograd
else:
assert not (supports_gradgrad and not supports_autograd), (
"supports_gradgrad refines the part of autograd is supported, so it should "
"not be set if supports_autograd is False")
if check_batched_grad is None:
check_batched_grad = supports_autograd or supports_forward_ad
else:
assert not (check_batched_grad and not (supports_autograd or supports_forward_ad)), (
"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so "
"it should not be set if supports_autograd is False")
if check_batched_gradgrad is None:
check_batched_gradgrad = supports_gradgrad
else:
assert not (check_batched_gradgrad and not supports_gradgrad), (
"check_batched_gradgrad refines the part of autograd that will be checked (by "
"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd "
"is False.")
if check_batched_forward_grad is None:
check_batched_forward_grad = supports_forward_ad
else:
assert not (check_batched_forward_grad and not supports_forward_ad), (
"check_batched_forward_grad should only be used when supports_forward_ad "
"is True. It is used to disable the test in the specific cases "
"where the op supports both forward ad but fails to compute "
"batched forward grad.")
self.supports_gradgrad = supports_gradgrad
self.check_batched_grad = check_batched_grad
self.check_batched_gradgrad = check_batched_gradgrad
self.check_batched_forward_grad = check_batched_forward_grad
# Autograd flags that depend on both forward AD and backward AD
if supports_inplace_autograd is None:
supports_inplace_autograd = supports_autograd or supports_forward_ad
else:
assert not (supports_inplace_autograd and not supports_autograd and not supports_forward_ad), (
"supports_inplace_autograd refines the part of autograd that is supported, so "
"it should not be set if both supports_autograd and supports_forward_ad are False")
self.supports_inplace_autograd = supports_inplace_autograd
self.supports_sparse = supports_sparse
self.supports_sparse_csr = supports_sparse_csr
self.aliases = ()
if aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]
self.supports_scripting = supports_scripting
self.assert_jit_shape_analysis = assert_jit_shape_analysis
self.test_conjugated_samples = test_conjugated_samples
self.test_neg_view = test_neg_view
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator_variant(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
with torch.no_grad():
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i in range(len(samples)):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
with torch.no_grad():
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
# TODO: Remove the try/except once all operators have sample_inputs_func with
# **kwargs in their signature.
try:
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
except TypeError:
samples = self.sample_inputs_func(self, device, dtype, requires_grad)
if 'include_conjugated_inputs' in kwargs and kwargs.get('include_conjugated_inputs'):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def error_inputs(self, device, **kwargs):
"""
Returns an iterable of ErrorInputs.
"""
return self.error_inputs_func(self, device, **kwargs)
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypesIfCPU
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypesIfCPU
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_complex_autograd(self, device_type):
if device_type == 'cpu':
return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)
if device_type == 'cuda':
if TEST_WITH_ROCM:
return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)
else:
return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)
else:
return any(dtype.is_complex for dtype in self.backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
def default_test_dtypes(self, device_type):
"""Returns the default dtypes used to test this operator on the device.
Equal to the operator's default_test_dtypes filtered to remove dtypes
not supported by the device.
"""
supported = self.supported_dtypes(device_type)
return (supported if self._default_test_dtypes is None
else supported.intersection(self._default_test_dtypes))
@property
def formatted_name(self):
"""Returns a formatted full name for this OpInfo that can be used in test names."""
variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''
return '{}{}'.format(self.name.replace('.', '_'), variant)
def _generate_reduction_inputs(device, dtype, requires_grad):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], device, dtype, requires_grad=requires_grad)
yield make_tensor([2], device, dtype, requires_grad=requires_grad)
yield make_tensor([3, 5], device, dtype, requires_grad=requires_grad)
yield make_tensor([3, 2, 1, 2], device, dtype, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.genearte_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
inputs: List[SampleInput] = []
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
inputs.append(SampleInput(
t.detach().clone().requires_grad_(requires_grad),
args=args,
kwargs=kwargs))
return inputs
def _generate_masked_op_mask(input_shape, device, **kwargs):
yield None
yield make_tensor(input_shape, device, torch.bool, requires_grad=False)
if len(input_shape) > 2:
# broadcast last mask dimension:
yield make_tensor(input_shape[:-1] + (1,), device, torch.bool, requires_grad=False)
# broadcast middle mask dimension:
yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], device, torch.bool, requires_grad=False)
# broadcast first mask dimension:
yield make_tensor((1,) + input_shape[1:], device, torch.bool, requires_grad=False)
# mask.ndim < input.ndim
yield make_tensor(input_shape[1:], device, torch.bool, requires_grad=False)
# mask.ndim == 1
yield make_tensor(input_shape[-1:], device, torch.bool, requires_grad=False)
# masks that require broadcasting of inputs (mask.ndim >
# input.ndim) will not be supported, however, we may
# reconsider this if there will be demand on this kind of
# degenerate cases.
def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked reduction operators.
Masked reduction operator is a reduction operator with trailing
mask optional argument. A mask is a bool tensor with the same
shape as input or a shape that is broadcastable to input shape.
"""
inputs: List[SampleInput] = []
kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims
for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
if(not requires_grad and dtype.is_floating_point and
sample_input.input.ndim == 2 and mask is not None and
mask.shape == sample_input.input.shape):
for v in [torch.inf, -torch.inf, torch.nan]:
t = sample_input.input.clone()
t.diagonal()[:] = v
inputs.append(SampleInput(t.detach().requires_grad_(requires_grad),
args=sample_input_args,
kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked norm.
"""
inputs: List[SampleInput] = []
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_var(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked var.
"""
inputs: List[SampleInput] = []
for unbiased in [False, True]:
for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs):
if sample_input.args:
dim = sample_input.args[0]
sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:]
sample_input_kwargs = sample_input.kwargs.copy()
else:
dim = sample_input.kwargs.get('dim')
sample_input_args = sample_input.args
sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased)
if requires_grad:
inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs)
orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64),
dim, keepdim=True, mask=inmask)
if orig_count.min() <= int(unbiased):
# Skip samples that lead to singularities in var
# computation resulting nan values both in var and
# autograd output that test_grad_fn cannot handle
# correctly.
continue
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by skipping the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
return sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
kwargs.setdefault('default_test_dtypes', (
torch.uint8, torch.int64, torch.float16, torch.bfloat16, torch.float32, torch.complex64))
super(ReductionOpInfo, self).__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
def sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
if op_info.supports_sparse_csr:
# Tensors with dim=2 for sparse CSR testing
return (SampleInput(make_tensor((L, L), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),)
else:
return (SampleInput(make_tensor((L,), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device=device, dtype=dtype,
low=low, high=high,
requires_grad=requires_grad)))
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
default_test_dtypes=(
torch.uint8, torch.long, torch.half, torch.bfloat16,
torch.float32, torch.cfloat), # dtypes which tests check by default
domain=(None, None), # the [low, high) domain of the function
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
handles_extremals=True, # whether the op correctly handles extremal values (like inf)
handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
supports_sparse=False,
reference_numerics_filter=None, # Filter for singular input values for test_reference_numerics_normal
**kwargs):
super(UnaryUfuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
default_test_dtypes=default_test_dtypes,
sample_inputs_func=sample_inputs_func,
supports_sparse=supports_sparse,
**kwargs)
self.ref = ref
self.domain = domain
self.handles_large_floats = handles_large_floats
self.handles_extremals = handles_extremals
self.handles_complex_extremals = handles_complex_extremals
self.supports_complex_to_float = supports_complex_to_float
self.reference_numerics_filter = reference_numerics_filter
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
args_cases = (
# Cases with tensor indices.
(torch.tensor([1, 2, 3]),),
(torch.tensor(1),),
(torch.tensor([1, 2, 3]), 1),
(torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1),
# Cases with list of indices.
((2, 4),),
((2, 4), 1),
((2, 4), -1),
# Cases with integer section.
(3,),
(3, 1),
(3, -1),
)
def generator():
for args in args_cases:
yield SampleInput(make_input((S, S, S)), args=args)
return list(generator())
def sample_inputs_linalg_det(op_info, device, dtype, requires_grad):
kw = dict(device=device, dtype=dtype)
inputs = [
make_tensor((S, S), **kw),
make_tensor((1, 1), **kw), # 1x1
random_symmetric_matrix(S, **kw), # symmetric
random_symmetric_psd_matrix(S, **kw), # symmetric_psd
random_symmetric_pd_matrix(S, **kw), # symmetric_pd
random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null
random_square_matrix_of_rank(S, 1, **kw), # rank1
random_square_matrix_of_rank(S, 2, **kw), # rank2
random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value
make_tensor((3, 3, S, S), **kw), # batched
make_tensor((3, 3, 1, 1), **kw), # batched_1x1
random_symmetric_matrix(S, 3, **kw), # batched_symmetric
random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd
random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd
random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values
make_tensor((0, 0), **kw),
make_tensor((0, S, S), **kw),
]
for t in inputs:
t.requires_grad = requires_grad
return [SampleInput(t) for t in inputs]
def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_singular_matrix_batch_base(size, rank):
assert size[-1] == size[-2]
assert rank > 0 and rank <= size[-1]
with torch.no_grad():
n = size[-1]
a = make_arg(size[:-2] + (n, rank)) / 10
b = make_arg(size[:-2] + (rank, n)) / 10
x = a @ b
lu, pivs = x.lu()
p, l, u = torch.lu_unpack(lu, pivs)
u_diag_abs = u.diagonal(0, -2, -1).abs()
u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values
u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices
u.diagonal(0, -2, -1).div_(u_diag_abs_largest)
u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps
matrix = p @ l @ u
assert (matrix.det().abs() < torch.finfo(dtype).eps * torch.linalg.matrix_norm(matrix)).all().item()
matrix.requires_grad_(requires_grad)
return matrix
def sample_generator():
for batch, size in product(((), (2,), (2, 2)), range(6)):
shape = batch + (size, size)
for rank in range(1, size):
yield make_singular_matrix_batch_base(shape, rank)
return [SampleInput(t) for t in sample_generator()]
def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):
# (<matrix_size>, (<batch_sizes, ...>))
test_sizes = [
(1, ()),
(2, (0,)),
(2, (2,)),
]
inputs = []
for matrix_size, batch_sizes in test_sizes:
size = batch_sizes + (matrix_size, matrix_size)
for n in (0, 3, 5):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(n,)))
for n in [-4, -2, -1]:
t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)
t.requires_grad = requires_grad
inputs.append(SampleInput(t, args=(n,)))
return inputs
def sample_inputs_hsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6,), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_vsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((6, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),
SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),)
def sample_inputs_dsplit(op_info, device, dtype, requires_grad):
return (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=([1, 2, 3],),),
SampleInput(make_tensor((S, S, 6), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(2,),),)
def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):
# Each test case consists of the sizes in the chain of multiplications
# e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)
test_cases = [
[1, 2, 1],
[2, 0, 2],
[0, 2, 2],
[2, 2, 2, 2],
[2, 3, 4, 5],
[5, 4, 0, 2],
[2, 4, 3, 5, 3, 2]
]
result = []
for sizes in test_cases:
tensors = []
for size in zip(sizes[:-1], sizes[1:]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
tensors.append(t)
result.append(SampleInput(tensors))
return result
def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((2, 2), (2, 3, 2))
ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)
dims = ((-2, -1), (-1, 0))
inputs: List[SampleInput] = []
for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):
t = make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(t, args=(ord, dim, keepdim)))
return inputs
def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):
test_sizes = [
(S,),
(0,),
(S, S),
(0, 0),
(S, 0),
(0, S),
(S, S, S),
(0, S, S),
(S, 0, S),
(0, 0, 0),
]
vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)
matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)
inputs = []
for test_size in test_sizes:
is_vector_norm = len(test_size) == 1
is_matrix_norm = len(test_size) == 2
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype, low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
keepdim=keepdim)))
if not (is_vector_norm or is_matrix_norm):
continue
ords = vector_ords if is_vector_norm else matrix_ords
for ord in ords:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim)))
if ord in ['nuc', 'fro']:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
kwargs=dict(
ord=ord,
keepdim=keepdim,
dim=(0, 1))))
return inputs
def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input shape, output shape, output stride, output storage offset
test_cases = [
((1,), (1,), (1,), 0),
((3, 3), (2, 2), (1, 2), 0),
((3, 3), (2, 2), (1, 2), 1),
((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0),
((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0),
]
samples = []
for input_shape, output_shape, stride, storage_offset in test_cases:
input_t = make_arg(input_shape)
kwargs = dict(storage_offset=storage_offset)
samples.append(SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs))
return samples
def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs):
inputs = (
(0,),
(0, 1),
(0, 1, 2, 3),
)
rvals = [1, 2, 4]
products = product(inputs, rvals, [False, True])
samples = []
for input_data, r, with_replacement in products:
input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad)
kwargs = dict(r=r, with_replacement=with_replacement)
samples.append(SampleInput(input_t, kwargs=kwargs))
return tuple(samples)
def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# constructs 1-D tensors with varying number of elements
a = make_arg((0,))
b = make_arg((0, 1))
c = make_arg((0, 1, 2, 3))
samples = []
# sample with only 1 tensor
samples.append(SampleInput(
a
))
# sample with 2 tensors
samples.append(SampleInput(
a,
args=(b,)
))
# sample with 3 tensors
samples.append(SampleInput(
a,
args=(b, c)
))
return tuple(samples)
def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input_shape, dict of dim and eps
cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S), {'dim': 1}),
((S, 2), {'dim': -1}),
((S,), {'dim': 0, 'eps': 0.5}),
((), {'dim': 0}),
((S, S, M), {'dim': 2}),
((S, S), {})
)
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs)
# Test for Broadcasting
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2})
yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1})
return list(generator())
def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for training, momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}),
((3, 2, 4), {'training': False, 'momentum': -1.2}),
((3, 1), {'training': True, 'momentum': 0.0}),
((0,), {'training': True}),
((0,), {'training': False}),
((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}),
((2, 1), {}),
)
def generator():
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1] if len(input_shape) > 1 else 0
weight = make_arg(channels) if channels > 0 else None
bias = make_arg(channels) if channels > 0 else None
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
weight,
bias
),
kwargs=kwargs
)
# Checking for permutations of weights and biases as `None`
weights = [channels, None, None]
biases = [None, channels, None]
is_training = [True, False, False]
for weight, bias, training in zip(weights, biases, is_training):
yield SampleInput(
make_arg(input_shape),
args=(
running_mean,
running_var,
make_arg(channels),
make_arg(channels)
),
kwargs={'training': training}
)
# Test case for no optional kwargs
# running_mean and running_var are required in evaluation mode (training: False) but not in training mode
yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True})
return list(generator())
def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
yield SampleInput(make_arg(shape))
return list(generator())
def sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
(()),
((S, )),
((S, S)),
((S, M, S))
)
def generator():
for shape in cases:
for weight in [-1., 0., 0.8, 1.]:
weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg(shape), kwargs=dict(weight=weight_tensor))
if len(shape) >= 2:
channel_size = shape[1]
yield SampleInput(make_arg(shape), kwargs=dict(weight=make_arg((channel_size,))))
return list(generator())
def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (2,), '2'),
((S, S), (0,), '0'),
((S, S), (0.5,), '0_5'),
((S, S), (1,), '1'),
((S, S), (3,), '3'),
((S, S), (-1,), 'neg_1'),
((S, S), (-2,), 'neg_2'),
((S, S), (-0.5,), 'neg_0_5'),
((S, S), (-1.5,), 'neg_1_5'),
)
cases_nonzero_input = (
((S, S, S), (1.5,), '1_5_default'),
((S, S, S), (1.5, 1), '1_5_dim'),
((S, S, S), (1.5, -1), '1_5_neg_dim'),
((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'),
((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'),
)
cases_negdim_base = (
((S, S), (-2, 1,), 'neg_2_2_dim'),
((S, S), (-1, 1,), 'neg_1_2_dim'),
((S, S), (0, 1,), '0_2_dim'),
((S, S), (1, 1,), '1_2_dim'),
((S, S), (2, 1,), '2_2_dim'),
((S, S), (3, 1,), '3_2_dim'),
((S, S, S), (2, 1), '2_dim'),
((S, S, S), (3, 1), '3_dim'),
((S, S, S), (2, 1, True), 'keepdim_2_dim'),
((S, S, S), (3, 1, True), 'keepdim_3_dim'),
((), (2, 0), '2_dim_scalar'),
((), (3, 0), '3_dim_scalar'),
((), (2, 0, True), 'keepdim_2_dim_scalar'),
((), (3, 0, True), 'keepdim_3_dim_scalar'),
)
cases_negdim = []
for case in cases_negdim_base:
cases_negdim.append(case)
shape, args, name = case
new_args = copy.deepcopy(list(args))
new_args[1] *= -1
cases_negdim.append((shape, tuple(new_args), name.replace("_dim", "_neg_dim")))
def generator():
for shape, args, name in itertools.chain(cases, cases_negdim):
yield SampleInput(make_arg(shape), args=args, name=name)
for shape, args, name in cases_nonzero_input:
yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name)
return list(generator())
def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (), 'default'),
((S, S), ('fro',), 'fro_default'),
((S, S), ('fro', [0, 1],), 'fro'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), ('nuc',), 'nuc'),
((S, S, S), ('nuc', [1, 2]), 'nuc_batched'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S), (-inf,), '-inf'),
((S, S), (inf,), 'inf'),
((S, S), (inf, 1,), 'inf_2_dim'),
((S, S), (inf, -1,), 'inf_2_neg_dim'),
)
def generator():
for shape, args, name in cases:
yield SampleInput(make_arg(shape), args=args, name=name)
return list(generator())
def sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):
size_1D = (S,)
size_2D = (2, 2)
test_cases = [
# input size, ord, dim args
(size_1D, 2, None),
(size_1D, 2, (0,)),
(size_1D, 0, None),
(size_1D, 0, (0,)),
(size_1D, 0.9, None),
(size_1D, 0.9, (0,)),
(size_1D, 1, None),
(size_1D, 1, (0,)),
(size_1D, -2.1, None),
(size_1D, -2.1, (0,)),
(size_1D, inf, None),
(size_1D, inf, (0,)),
(size_1D, -inf, None),
(size_1D, -inf, (0,)),
(size_2D, 2, None),
(size_2D, 2, (0,)),
(size_2D, 2, (-1, 0)),
(size_2D, 0, None),
(size_2D, 0, (0,)),
(size_2D, 0, (-1, 0)),
(size_2D, 0.9, None),
(size_2D, 0.9, (0,)),
(size_2D, 0.9, (-1, 0)),
(size_2D, 1, None),
(size_2D, 1, (0,)),
(size_2D, 1, (-1, 0)),
(size_2D, -2.1, None),
(size_2D, -2.1, (0,)),
(size_2D, -2.1, (-1, 0)),
(size_2D, inf, None),
(size_2D, inf, (0,)),
(size_2D, inf, (-1, 0)),
(size_2D, -inf, None),
(size_2D, -inf, (0,)),
(size_2D, -inf, (-1, 0)),
]
inputs = []
for test_size, ord, dim in test_cases:
for keepdim in [False, True]:
inputs.append(SampleInput(
make_tensor(
test_size, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(ord,),
kwargs=dict(
keepdim=keepdim,
dim=dim)))
return inputs
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(self, name, *,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float
always_returns_bool=False, # Set to true if the op always returns bool tensors
**kwargs):
super().__init__(name, **kwargs)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
self.promotes_int_to_float = promotes_int_to_float
self.always_returns_bool = always_returns_bool
def _resolve_binary_pwise_kwargs(
op_info, *, op_kwargs=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None
):
"""Resolves default values for :func:`sample_inputs_binary_pwise`.
By default :attr:`op_kwargs`, :attr:`lhs_make_tensor_kwargs`, and :attr:`rhs_make_tensor_kwargs` are just empty
dictionaries. In case :attr:`op_info` is a :class:`BinaryUfuncInfo`, :attr:`BinaryUfuncInfo.lhs_make_tensor_kwargs`
and :attr:`BinaryUfuncInfo.rhs_make_tensor_kwargs` will be used as defaults.
"""
if op_kwargs is None:
op_kwargs = {}
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = op_info.lhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = op_info.rhs_make_tensor_kwargs if isinstance(op_info, BinaryUfuncInfo) else {}
return op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs
def sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
*,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
scalar = make_tensor((), device=device, dtype=dtype, **rhs_make_tensor_kwargs)
if python_scalars:
scalar = scalar.item() # type: ignore[assignment]
shapes = [
((), scalar),
((S,), scalar),
((S, 1), (S,)),
((M, S), scalar),
((S, M, S), (M, S)),
((S, M, S), (S, M, S)),
((M, 1, S), (M, S)),
((M, 1, S), (1, M, S)),
]
sample_inputs = []
for shape_lhs, shape_rhs_or_scalar in shapes:
lhs = make_tensor(
shape_lhs,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**lhs_make_tensor_kwargs,
)
if isinstance(shape_rhs_or_scalar, tuple):
# shape
rhs = make_tensor(
shape_rhs_or_scalar,
device=device,
dtype=dtype,
requires_grad=requires_grad,
**rhs_make_tensor_kwargs,
)
broadcasts_input = torch.broadcast_shapes(shape_lhs, shape_rhs_or_scalar) != shape_lhs
else:
# scalar
rhs = shape_rhs_or_scalar # type: ignore[assignment]
broadcasts_input = False
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=op_kwargs, broadcasts_input=broadcasts_input))
return sample_inputs
def sample_inputs_add_sub(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
alpha=1,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,), kwargs=dict(op_kwargs, alpha=alpha), broadcasts_input=False))
return sample_inputs
def sample_inputs_isclose(
op_info,
device,
dtype,
requires_grad,
python_scalars=False,
op_kwargs=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
**kwargs,
):
op_kwargs, lhs_make_tensor_kwargs, rhs_make_tensor_kwargs = _resolve_binary_pwise_kwargs(
op_info,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
)
sample_inputs = sample_inputs_binary_pwise(
op_info,
device,
dtype,
requires_grad,
python_scalars=python_scalars,
op_kwargs=op_kwargs,
lhs_make_tensor_kwargs=lhs_make_tensor_kwargs,
rhs_make_tensor_kwargs=rhs_make_tensor_kwargs,
**kwargs,
)
rtols = [0., 1e-7]
atols = [0., 1e-7]
equal_nans = [False, True]
products = product(rtols, atols, equal_nans)
for rtol, atol, equal_nan in products:
lhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **lhs_make_tensor_kwargs)
rhs = make_tensor((S, S), device=device, dtype=dtype, requires_grad=requires_grad, **rhs_make_tensor_kwargs)
sample_inputs.append(SampleInput(lhs, args=(rhs,),
kwargs=dict(op_kwargs, rtol=rtol, atol=atol, equal_nan=equal_nan)))
return sample_inputs
def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((1, 2))),
SampleInput(make_arg((2,))),
SampleInput(make_arg(())))
def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
first_shape, second_shape = (S, M), (M, S)
sample_inputs = []
sample_inputs.append(
SampleInput(make_arg(first_shape, requires_grad=requires_grad),
args=(make_arg(second_shape, requires_grad=requires_grad),)))
if dtype.is_complex:
sample_inputs.append(
SampleInput(make_arg(first_shape, requires_grad=requires_grad),
args=(make_arg(second_shape).conj().requires_grad_(requires_grad),)))
sample_inputs.append(
SampleInput(
make_arg(first_shape).transpose(0, 1).requires_grad_(requires_grad),
args=(make_arg(second_shape).transpose(0, 1).conj().requires_grad_(requires_grad),)))
return sample_inputs
def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):
alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)
beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)
tests_list = [
((2, 3), (2, 2), (2, 3), False)
]
tests_with_lhs_broadcasting = [
((1,), (2, 2), (2, 3), True),
((), (2, 2), (2, 3), True)
]
test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]
sample_inputs = []
for shape_a, shape_b, shape_c, broadcasts_input in test_cases:
sample_inputs.append(
SampleInput(
make_tensor(shape_a, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape_b, device, dtype,
requires_grad=requires_grad),
make_tensor(shape_c, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shape = (3, 3)
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj(),
make_tensor(shape, device, dtype,
requires_grad=requires_grad)),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
sample_inputs.append(
SampleInput(make_tensor(shape, device, dtype, requires_grad=requires_grad),
args=(
make_tensor(shape, device, dtype,
requires_grad=requires_grad),
make_tensor(shape, device, dtype,
requires_grad=requires_grad).t().conj()),
kwargs={'alpha': alpha_val, 'beta': beta_val},))
return sample_inputs
def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):
sample_inputs = []
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
))
if dtype.is_complex:
# dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor)
# is tested in test_conj_view (which tests operations with only conjugated input tensor
# -- not conjugated arg tensors)
sample_inputs.append(SampleInput(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
torch.conj(make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
)
))
return sample_inputs
def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (((S,), (S, M), (M,), 1, 1, False),
((S,), (S, M), (M,), 0.2, 0.6, False),
)
test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),
((1,), (S, M), (M,), 0.2, 0.6, True),
((), (S, M), (M,), 1, 1, True),
((), (S, M), (M,), 0.2, 0.6, True),
)
cases = test_cases + test_cases_with_broadcast
def generator():
# addmv performs: beta * M + alpha * (mat @ vec)
for M, mat, vec, beta, alpha, broadcasts_input in cases:
yield SampleInput(make_arg(M), args=(make_arg(mat), make_arg(vec)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting
test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
def generator():
for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases:
if dtype.is_complex:
beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting)
yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting)
return list(generator())
def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [(((S, S), (S, S), (S, S)), False),
(((S, S), (S, 1), (1, S)), False),
(((1,), (S, S, 1), (1, S)), True),
(((), (), ()), False),
(((S, S), (), ()), True),
(((), (S, S, 1), (1, S)), True)
]
sample_inputs = []
for input_args, broadcasts_input in test_cases:
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
broadcasts_input=broadcasts_input))
args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg
for arg in input_args)
sample_inputs.append(SampleInput(
args[0],
args=args[1:],
kwargs=dict(value=3.14), broadcasts_input=broadcasts_input))
return tuple(sample_inputs)
def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):
test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),
((1,), (S, S, S), (S, S, M), 1, 1, True),
((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),
((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),
((), (S, S, S), (S, S, M), 1, 1, True),
((), (S, S, S), (S, S, M), 0.6, 0.2, True),
]
sample_inputs = []
for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:
args = (make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch1_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(batch2_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),
kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))
if dtype.is_complex:
sample_inputs.append(SampleInput(
args[0].detach().clone().requires_grad_(requires_grad),
args=(args[1].detach().clone().requires_grad_(requires_grad),
args[2].detach().clone().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),
broadcasts_input=broadcasts_input))
if dtype.is_complex:
shapes = [(S, S, S), (S, M, S), (S, S, M)]
args = (make_tensor(shapes[0], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[1], device, dtype,
low=None, high=None,
requires_grad=requires_grad),
make_tensor(shapes[2], device, dtype,
low=None, high=None,
requires_grad=requires_grad))
sample_inputs.append(
SampleInput(
args[0].transpose(-1, 1).detach().requires_grad_(requires_grad),
args=(args[1].transpose(-1, 1).conj().detach().requires_grad_(requires_grad),
args[2].transpose(-1, 1).conj().detach().requires_grad_(requires_grad)),
kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),))
return tuple(sample_inputs)
def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):
input1 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))
input2 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
broadcasts_input=True)
if dtype.is_complex:
alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j
elif dtype.is_floating_point:
alpha, beta = 0.2, 0.6
else:
alpha, beta = 2, 3
input3 = SampleInput(
make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha))
input4 = SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),
kwargs=dict(beta=beta, alpha=alpha),
broadcasts_input=True)
return (input1, input2, input3, input4)
def sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),
)
),
)
def sample_inputs_xlog1py(self, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
# same shape
yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))
# rhs broadcast
yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))
# all zero `x`
with torch.no_grad():
x = make_arg((S, S))
x.fill_(0)
yield SampleInput(x, args=(make_arg((S, S), low=-1),))
# randomly zero-masked `x`
x = make_arg((S, S))
y = make_arg((S, S), low=-1)
with torch.no_grad():
x[torch.rand(x.shape) > 0.5] = 0
yield SampleInput(x, args=(y,))
# Scalar x
# `input` has to be a tensor
# yield SampleInput(0, args=(make_arg((S, S), low=-1),))
# yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))
# Scalar y
yield SampleInput(make_arg((S, S)), args=(-0.5,))
yield SampleInput(make_arg((S, S)), args=(1.2,))
return list(generator())
def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = ((), (S, S, S), (S,))
def generator():
for shape in cases:
yield(SampleInput(make_arg(shape)))
return list(generator())
def sample_inputs_logsumexp(self, device, dtype, requires_grad):
inputs = (
((), (0,), True),
((S, S), (1,), True),
((S, S), (1,), False)
)
samples = []
for shape, dim, keepdim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(dim, keepdim)))
return tuple(samples)
def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), {}),
((S, S), {}),
((0, S, 0), {}),
((S,), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), {'dtype': torch.double}),
((S,), {'device': 'cpu'}),
((S,), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), {'device': 'cuda'}))
samples = []
for shape, kwargs in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, kwargs=kwargs))
return tuple(samples)
def get_independent_tensor(tensor):
return tensor.detach().clone().requires_grad_(tensor.requires_grad)
def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs):
samples = []
low = 2
high = 10
for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs):
# With high
samples.append(SampleInput(
sample.input,
args=(high,) + sample.args,
kwargs=sample.kwargs))
# With low and high
samples.append(SampleInput(
get_independent_tensor(sample.input),
args=(low, high,) + sample.args,
kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
inputs = [
((), (), {}),
((S, S), (2, 0), {}),
((0, S, 0), (3, 2, 2), {}),
((S,), (2, 3), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), (10,), {'dtype': torch.double}),
((S,), (1, 1, 12), {'device': 'cpu'}),
((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), (7, 2), {'device': 'cuda'}))
samples = []
for input_shape, output_shape, kwargs in inputs:
t = make_tensor(input_shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], 'cpu', dtype).item()
samples = []
for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs):
# The scalar we are passing to new_full must be the same dtype
# as the one of the resulting tensor
use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype
samples.append(SampleInput(
sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs))
return tuple(samples)
def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs):
def get_val(dtype):
return make_tensor([], 'cpu', dtype).item()
inputs = [
((), get_val(dtype), {}),
((S, S), get_val(dtype), {}),
((0, S, 0), get_val(dtype), {}),
((S,), get_val(dtype), {'dtype': dtype, 'device': device}),
# Hard-code some dtypes/devices. We want to test cases where the
# (dtype, device) is different from the input's (dtype, device)
((S,), get_val(torch.double), {'dtype': torch.double}),
((S,), get_val(dtype), {'device': 'cpu'}),
((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}),
]
if torch.cuda.is_available():
inputs.append(((S,), get_val(dtype), {'device': 'cuda'}))
samples = []
for shape, fill_value, kwargs in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs))
return tuple(samples)
def sample_inputs_logcumsumexp(self, device, dtype, requires_grad):
inputs = (
((S, S, S), 0),
((S, S, S), 1),
((), 0),
)
samples = []
for large_number in (True, False):
for shape, dim in inputs:
t = make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad)
if large_number and t.dim() > 0:
with torch.no_grad():
t[0] = 10000
samples.append(SampleInput(t, args=(dim,)))
return tuple(samples)
def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):
return (SampleInput((make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))),)
def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (2, 1, 0.5)),
((S, S, S), (2, -1, 0.5)),
((S, S, S), (1, 2, 3)),
((S, S, S), (float('inf'), 2, 0.5)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((1, 2, 3), (-1, -2)),
((1, 2, 3), (-1, 2)),
((1, 2, 3), (1, -2)),
((1, 2, 3), (1, 2)),
((), (0, 0)),
((1, ), (0, 0)),
((M, M), (0, 1)),
((S, S, S), (2, 0)), )
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S))
return list(SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_T(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((), (M, M))
return list(SampleInput(make_arg(shape)) for shape in shapes)
def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always invertible input for linear algebra ops using
random_fullrank_matrix_distinct_singular_value.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n in product(batches, ns):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a))
return out
def sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to
test the backward method of `linalg_pinv`. That way we always preserve the rank of the
input no matter the perturbations applied to it by the gradcheck.
Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood.
"""
batches = [(), (0, ), (2, ), (1, 1)]
# the size of at least 30 is required to cause failures for the previous implicit implementation
# of the pinv's backward method, albeit it is slow.
size = [0, 3, 50]
def generate_samples():
for batch, m, n in product(batches, size, size):
for k in range(min(3, min(m, n))):
# Note that by making the columns of `a` and `b` orthonormal we make sure that
# the product matrix `a @ b.t()` has condition number 1 when restricted to its image
a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad)
yield SampleInput(a, args=(b,))
return list(generate_samples())
def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# autograd is not supported for inputs with zero number of elements
shapes = ((S, S),
(2, S, S),
(2, 1, S, S), )
def generator():
for shape in shapes:
yield SampleInput(make_arg(shape))
return list(generator())
def np_sinc_with_fp16_as_fp32(x):
# Wraps numpy's sinc function so that fp16 values are promoted to fp32
# before sinc is invoked. Context: numpy's sinc returns NaN when evaluated
# at 0 for fp16.
if x.dtype == np.float16:
return np.sinc(x.astype(np.float32))
else:
return np.sinc(x)
def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
return tuple(
SampleInput(
make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(shape,)) for size, shape in test_cases)
def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases: Tuple[tuple] = (((1, S), (2, S), (3, S),),)
samples: List[SampleInput] = []
for shape, *other_shapes in test_cases:
samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)))
return samples
def sample_inputs_bitwise_shift(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
(S, S, S),
(S,),
(),
)
sample_inputs = []
for size in test_cases:
tensor1 = make_tensor(size, device, dtype, low=-32, high=32, requires_grad=requires_grad)
tensor2 = make_tensor(size, device, dtype, low=0, high=5, requires_grad=requires_grad)
sample_inputs.append(SampleInput(tensor1, args=(tensor2,)))
sample_inputs.append(SampleInput(tensor1, args=(2,)))
return tuple(sample_inputs)
def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):
small_S = 2
test_cases = (
((S, S, 2), (S, S + 1, 2)),
((S, S), (S, S)),
((S, S, S), (S, S, S)),
((3, 5), (3, 5)),
((2, 3, 5), (2, 3, 5)),
((1, 2, 3), (1, 2, 3)),
((1, 1), (S, 1)),
((0, 5), (4, 5)),
((4, 5), (0, 5)),
((0, 4, 5), (3, 5)),
((4, 5), (0, 3, 5)),
((0, 4, 5), (1, 3, 5)),
((1, 4, 5), (0, 3, 5)),
# Using S here would make this one test take 9s
((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),
((small_S, 1, 1, small_S), (1, small_S, small_S)),
((1, 1, small_S), (small_S, 1, small_S, small_S)),
)
samples = []
for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:
# FIXME add an override for JIT and revert 0. back to 0
# since it's accepted by eager
for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]:
for t1_size, t2_size in test_cases:
# The args should never be non-contiguous as this is not supported in the backward
samples.append(SampleInput(
make_tensor(t1_size, device, dtype, requires_grad=requires_grad),
args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad), p, cm)))
return samples
def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype,
low=None, high=None, requires_grad=requires_grad)
cases = (((S, S, S), (1,)),
((), (1,)),
# For requires_grad=False below,
# check https://github.com/pytorch/pytorch/issues/59137
((S, S, S), (make_arg((), requires_grad=False),)))
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):
test_cases = (
((S, S, S), (S, S, S), False),
((S, S, S), (), False),
((S, S, S), (1,), False),
((S,), (1,), False),
((), (), False),
)
test_cases_lhs_broadcasting = (
((S, 1, S), (S, S, S), True),
((1,), (S, S, S), True),
((1, S), (1, 1, S), True),
((), (0,), True),
((), (S, S, S), True),
)
cases = test_cases + test_cases_lhs_broadcasting
sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
broadcasts_input=broadcasts_input)
for first_shape, second_shape, broadcasts_input in cases)
equal_tensors_non_bool = (
([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),
([[[6, 5]], [[1, -5]]]),
([[2], [-1]]),
([0, -6]),
([3],),
)
equal_tensors_bool = (
([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),
([[[1, 1]], [[1, 0]]]),
([[1], [0]]),
([0, 1]),
([1],),
)
more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool
more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),
args=(torch.tensor(elements, device=device, dtype=dtype,
requires_grad=requires_grad),))
for elements in more_cases)
sample_inputs = [*sample_inputs, *more_inputs]
return tuple(sample_inputs)
def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors, args=(0,)),)
def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment]
((S, S), (S, S), {'dim': -1}),
((S, S), (S, S), {'dim': 1}),
((M, S), (S, S), {'dim': 0}), # different shapes
((1, 2, 3), (1, 2, 3), {'dim': -2}),
((0,), (0,), {'dim': 0}), # empty tensor
((0, S), (S, S), {'dim': 0}),
((1,), (1,), {}) # dim not passed, fallback to default
)
def generator():
for input_shape1, input_shape2, kwargs in cases:
yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs)
return list(generator())
def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):
tensors = [
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
]
return (SampleInput(tensors),)
def sample_inputs_hypot(op_info, device, dtype, requires_grad):
input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)
return (
SampleInput(input, args=(args,)),
)
def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, gather_variable((S, S), 1, M, True, device=device))),
SampleInput(
make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
# Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006
SampleInput(
make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([], dtype=torch.uint8, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):
return (SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S), 1, S, True, device=device), 0)),
# `indices` broadcast
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),
# `self` broadcast
SampleInput(make_tensor((1, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),
# without `dim` arg
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device), )),
SampleInput(make_tensor((S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(gather_variable((S, S // 2), 0, S, True, device=device),)),
)
def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs):
test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment]
((S, S, S), {}),
((S, S, S), {'dim': 1}),
((S, S, S), {'dim': 1, 'keepdim': True}),
((), {'dim': 0}),
((), {}),
((), {'dim': 0, 'keepdim': True}),
)
samples: List[SampleInput] = []
for shape, kwargs in test_cases:
samples.append(SampleInput(
make_tensor(shape, device, dtype, requires_grad=requires_grad),
kwargs=kwargs))
return samples
def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
test_cases = (
((1,), 0, None, None),
((S,), 0, None, None),
((S, 1), 0, None, None),
((S, 1), 1, None, None),
((S, S), 0, None, None),
((S, S), 1, None, None),
((S, S), 0, (1, S), (2, S)),
((S, S), 0, None, (2, S)),
((S, S, S), 1, None, None),
((S, S, S), 2, None, None),
((S, S, S), 1, (S, 1, S), (S, 1, S)),
((S, S, S), 2, (S, S, 1), (S, S, 1)),
((S, S, S), 2, (S, S, S), (S, S, S)),)
sample_inputs = []
for size, dim, size_prepend, size_append in test_cases:
prepend_size = 0 if (size_prepend is None) else size_prepend[dim]
append_size = 0 if (size_append is None) else size_append[dim]
dim_size = size[dim] + prepend_size + append_size
for n in range(dim_size):
input_tensor = make_arg(size)
prepend = make_arg(size_prepend) if size_prepend else None
append = make_arg(size_append) if size_append else None
sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,)))
# add some samples with n > dim_size
sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,)))
sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),)))
return sample_inputs
def sample_inputs_histogram(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]):
input_tensor = make_arg(size)
weight_tensor = make_arg(size) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = make_arg((bin_ct + 1,))
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histogramdd(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S))
bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3))
sample_inputs = []
for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]):
input_tensor = make_arg(size)
bin_ct = bin_ct_pattern[:size[-1]]
weight_tensor = make_arg(size[:-1]) if weighted else None
sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,),
kwargs=dict(weight=weight_tensor, density=density)))
bins_tensor = [make_arg(ct + 1) for ct in bin_ct]
sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,),
kwargs=dict(weight=weight_tensor, density=density)))
return sample_inputs
def sample_inputs_histc(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, min, max in product(sizes, [0, -10], [0, 10]):
# construct sample input omitting bins arg
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(min=min, max=max)))
# construct sample inputs with a few different bins values
for bins in [1, 3, 10]:
sample_inputs.append(SampleInput(make_arg(size),
kwargs=dict(bins=bins, min=min, max=max)))
return sample_inputs
def sample_inputs_bincount(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs = []
for size, weighted in product((S, M), [False, True]):
input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device)
weight_tensor = make_arg((size,)) if weighted else None
max_val = int(input_tensor.max().item())
for minlength in [0, max_val // 2, max_val, 2 * max_val]:
sample_inputs.append(SampleInput(input_tensor,
kwargs=dict(weights=weight_tensor, minlength=minlength)))
return sample_inputs
def sample_inputs_bucketize(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for size, out_int32, right in product(sizes, [False, True], [False, True]):
input_tensor = make_arg(size)
boundaries = make_arg((S,)).msort()
sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ),
kwargs=dict(out_int32=out_int32, right=right)))
return sample_inputs
def sample_inputs_searchsorted(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M))
inputs = []
for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]):
unsorted_tensor = make_arg(size, noncontiguous=noncontiguous)
input_tensor = make_arg(size, noncontiguous=noncontiguous)
if np.product(size) == 0:
boundary_tensor = unsorted_tensor
sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous)
else:
boundary_tensor, sorter = torch.sort(unsorted_tensor)
side = "right" if right else "left"
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right)))
inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter)))
inputs.append(
SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter)))
return inputs
def sample_inputs_gradient(op_info, device, dtype, requires_grad):
sample_inputs = []
test_cases_float = (
((S,), None, None, 1),
((S,), 2., None, 1),
((S, S), None, None, 2),
((S, S), [2.0, 2.1], None, 1),
((S, S), [2.0, 2.1], (0, 1), 1),
((4, 4, 4), [2., 1.], (0, 1), 2),
)
for size, spacing, dim, edge_order in test_cases_float:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order)))
test_cases_tensor = (
((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1),
((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2),
)
for size, coordinates, dim, edge_order in test_cases_tensor:
t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)
coordinates_tensor_list = []
for coords in coordinates:
# `coords` will always contain floating point values and Python 3.10 does not support this
# implicit conversion to an integer using `__int__`
# TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed
a = torch.tensor(coords, device=device)
coordinates_tensor_list.append(a.to(dtype))
sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order)))
return tuple(sample_inputs)
def sample_inputs_index_select(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, index_variable(2, S, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor([0], dtype=torch.int64, device=device))),
SampleInput(
make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(0, torch.tensor(0, dtype=torch.int64, device=device))),
)
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
test_args = [
([1, 2],),
(slice(0, 3),),
([slice(0, 3), 1],),
([[0, 2, 3], [1, 3, 3], [0, 0, 2]],),
([[0, 0, 3], [1, 1, 3], [0, 0, 2]],),
([slice(None), slice(None), [0, 3]],),
([slice(None), [0, 3], slice(None)],),
([[0, 3], slice(None), slice(None)],),
([[0, 3], [1, 2], slice(None)],),
([[0, 3], ],),
([[0, 3], slice(None)],),
([[0, 3], Ellipsis],),
([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],),
(index_variable(2, S, device=device),),
(mask_not_all_zeros((S,)),),
]
return tuple(SampleInput(
make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=args)
for args in test_args)
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for accumulate in [False, True]:
# Test with indices arg
inputs.append(SampleInput(
make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(index_variable(2, S, device=device), ),
make_tensor((2, S), device, dtype, low=None, high=None)),
kwargs=dict(accumulate=accumulate)))
# Test with mask arg
mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))
inputs.append(SampleInput(
make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(
(mask, ),
make_tensor((S,), device, dtype, low=None, high=None),),
kwargs=dict(accumulate=accumulate)))
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):
# These testa are pretty much the same as those from index_copy.
# Perhaps merge?
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
idx = make_arg((S,), dtype=torch.int64, low=0, high=S, requires_grad=False)
samples = [SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(1,
idx.detach().clone(),
s.detach().clone().requires_grad_(requires_grad)))]
for alpha in (-1, 0, 2):
samples.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(1,
idx.detach().clone(),
s.detach().clone().requires_grad_(requires_grad)),
kwargs=dict(alpha=alpha)))
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx.detach().clone(), s.detach().clone())) for t, idx, s in product(ts, idxs, ss))
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx.detach().clone(), s.detach().clone()),
kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))
return samples
def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):
def small_3d_unique():
res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)
res = res.to(dtype).requires_grad_(requires_grad)
return res
def large_1d_unique():
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype).requires_grad_(requires_grad)
return res
samples = []
# Test case for large tensor.
samples.append(SampleInput(large_1d_unique()))
# Test cases for small 3d tensors.
# Imitates legacy tests from test/test_torch.py
dims = range(-3, 3)
flag = [True, False]
for dim, descending, stable in product(dims, flag, flag):
# default schema without stable sort
samples.append(SampleInput(small_3d_unique(),
args=(dim, descending)))
# schema with stable sort, no CUDA support yet
if torch.device(device).type == 'cpu':
samples.append(
SampleInput(small_3d_unique(),
kwargs=dict(dim=dim, descending=descending, stable=stable))
)
# Test cases for scalar tensor
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0,)))
samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad),
args=(0, True)))
# Test cases for stable sort
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, stable=True)))
samples.append(SampleInput(small_3d_unique(),
kwargs=dict(dim=0, descending=True, stable=True)))
return samples
def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S))
samples = []
for x_size in sizes:
# threshold and values args must be numbers
samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item())))
return samples
def sample_inputs_argsort(*args, **kwargs):
return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if "stable" not in sample_input.kwargs]
def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs):
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
sample_inputs = []
for shape, sorted, return_inverse, return_counts, dim in \
product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]):
# torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim
if 0 in shape and shape.index(0) is not dim:
continue
# skip invalid dim args
if dim is not None and (dim < -len(shape) or dim >= len(shape)):
continue
kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
# construct a test case with only one distinct value
input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with mixed 0s and 1s
input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\
.to(dtype).requires_grad_(requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
# construct a test case with many different values
input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)
sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy()))
return sample_inputs
def sample_inputs_unique_consecutive(*args, **kwargs):
def generator():
for sample_input in sample_inputs_unique(*args, **kwargs):
if not sample_input.kwargs["sorted"]:
sample_input.kwargs.pop("sorted")
yield sample_input
return list(generator())
def sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
index_tensor = partial(torch.tensor, device=device, dtype=torch.long)
samples = []
fill_val = torch.tensor(-1 + 1j if dtype.is_complex else -1)
idx = index_variable(1, S, device=device)
ndim = 3
for d in range(ndim):
samples.append(SampleInput(make_arg((S,) * ndim), args=(d, idx, fill_val)))
samples.append(SampleInput(make_arg((S,) * ndim), args=(d, -idx - 1, fill_val)))
def unique_idx(numel, max_idx):
# Generate unique random indices vector of `numel`
# elements in range [0, max_idx).
indices = random.sample(range(max_idx), numel)
return index_tensor(indices)
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))
samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))
# Duplicate indices
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))
samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))
return samples
def sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_binary_op = (
((S, S, S), (S, S, S),),
((S, S, S), (S,),),
((S,), (S, S, S),),
((S, 1, S), (S, S),),
((S, S), (S, S),),
((), (),),
((S, S, S), (),),
((), (S, S, S),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=(make_tensor(other_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),),))
for input_tensor, other_tensor in args_for_binary_op)
return inputs
def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8), (5,)),
((3, 8, 8), 5),
((3, 8, 8), 1)
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((1, 8, 8, 8), (5, 7)),
((2, 8, 8, 8), (None, 7)),
((1, 8, 4, 3), (5, None)),
((1, 8, 4, 3), (None, None)),
((1, 8, 4, 3), (5)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
((0, 8, 8, 8, 8), (5, 7, 4)),
((1, 8, 4, 3, 7), (None, None, None)),
((1, 8, 4, 3, 7), (1, 1, 1)),
((3, 3, 8, 8, 6), (5, 7, None)),
((1, 3, 8, 8, 6), (5, None, 2)),
((3, 3, 8, 8, 6), (None, 3, 2)),
)
def generator():
for input_shape, output_size in cases:
yield SampleInput(make_arg(input_shape), args=(output_size,))
return list(generator())
def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8), (5,)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((3, 4, 4), 3),
((3, 4, 4), 1)
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8), (5, 7)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 4), (2, 3)),
((2, 4, 4, 4), (None, 3)),
((2, 4, 4, 4), (1, 1)),
((1, 4, 4, 3), (3, None)),
((1, 4, 4, 3), (None, None)),
((1, 4, 4, 3), (3)),
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as (input shape, output size)
cases = (
# ((0, 8, 8, 8, 8), (5, 7, 4)),
# 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1]
((1, 4, 4, 3, 5), (None, None, None)),
((1, 4, 4, 3, 5), (1, 1, 1)),
((3, 3, 4, 4, 6), (2, 3, None)),
((1, 3, 4, 4, 6), (3, None, 2)),
((3, 3, 4, 4, 6), (None, 3, 2)),
)
def generator():
for shapes, return_idx in product(cases, (True, False)):
yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx))
return list(generator())
class _TestParamsMaxPoolBase(object):
def __init__(self):
self.kwargs = {
'kernel_size': [3],
'stride': [2, None],
'ceil_mode': [True, False],
'padding': [0, 1],
'dilation': [1],
'return_indices': [True, False]
}
self.shapes = [
[1, 2, None], # batch
[2], # channels
[3, 6] # signal
]
def _gen_shape(self):
for shape in product(*self.shapes):
# shape[0] is None indicates missing batch dimension
if shape[0] is None:
shape = shape[1:]
yield shape, torch.contiguous_format
# only 2d (N, C, H, W) rank 4 tensors support channels_last memory format
if len(self.shapes) == 4 and len(shape) == 4:
yield shape, torch.channels_last
def _gen_kwargs(self):
keys = self.kwargs.keys()
for values in product(*self.kwargs.values()):
yield dict(zip(keys, values))
def gen_input_params(self):
yield from product(self._gen_shape(), self._gen_kwargs())
class _TestParamsMaxPool1d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3,)]
self.kwargs['stride'] += [(2,)]
self.kwargs['padding'] += [(1,)]
self.kwargs['dilation'] += [(1,)]
class _TestParamsMaxPool2d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2)]
self.kwargs['stride'] += [(2, 1)]
self.kwargs['padding'] += [(1, 1)]
self.kwargs['dilation'] += [(1, 2)]
self.shapes.append([6])
class _TestParamsMaxPool3d(_TestParamsMaxPoolBase):
def __init__(self):
super().__init__()
self.kwargs['kernel_size'] += [(3, 2, 3)]
self.kwargs['stride'] += [(2, 1, 2)]
self.kwargs['dilation'] += [(1, 2, 1)]
self.shapes.append([6])
self.shapes.append([5])
def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
params_generator_type_dict = {
'nn.functional.max_pool1d': _TestParamsMaxPool1d,
'nn.functional.max_pool2d': _TestParamsMaxPool2d,
'nn.functional.max_pool3d': _TestParamsMaxPool3d,
}
def generator():
params_generator = params_generator_type_dict[op_info.name]()
for (shape, memory_format), kwargs in params_generator.gen_input_params():
arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad)
yield SampleInput(arg, kwargs=kwargs)
return list(generator())
def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad)
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((2, 1, 4, 5), {'p': 1., 'dim': 2}),
((2, 3, 4, 5), {'p': 2., 'dim': 1}),
((1, 2, 4, 5), {'p': 0.5, 'dim': 0}),
((1, 3, 4, 5), {'p': -1., 'dim': 1}),
((1, 3, 4, 5), {'p': 0., 'dim': -1}),
((), {'p': 1.2, 'dim': 0}),
((2, 3, 4, 5), {}),
((2, 3, 4, 5), {'eps': 1e-4}))
def generator():
for input_shape, kwargs in cases:
yield SampleInput(make_arg(input_shape), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4), (3, 3, 3), (3,),
{'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}),
((2, 2, 4), (2, 2, 4), (4,),
{'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}),
((1, 1, 4), (1, 1, 4), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}),
((1, 1, 4), (1, 2, 3), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5), (4, 8, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}),
((2, 2, 4, 4), (2, 2, 4, 5), (4,),
{'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}),
((1, 1, 4, 5), (1, 1, 4, 3), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 1, 4, 3), (1, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5), (4, 8, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, output_padding, groups, dilation)
cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,),
{'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}),
((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,),
{'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}),
((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,),
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}),
((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None,
{'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}),
((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None,
{})
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias,
# and a dict of values of (stride, padding, dilation, groups)
cases: Tuple = (
((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}),
((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}),
((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}),
((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}),
# With defaults
((1, 4, 5), (3, 4, 3), None, {}),
)
# TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged
# Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck
# in test/test_nn.py
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as shapes for input, weight, bias
# and a dict of values of (stride, padding, groups, dilation)
cases: Tuple = (
((1, 3, 4, 4), (3, 3, 3, 3), (3,),
{'stride': (2, 2), 'padding': 2, 'groups': 1}),
((2, 4, 8, 8), (2, 2, 3, 3), (2,),
{'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}),
((1, 2, 4, 3), (4, 2, 3, 4), None,
{'stride': 2, 'padding': 1, 'groups': 1}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 2, 'padding': "valid"}),
((1, 4, 5, 5), (1, 4, 2, 3), (1,),
{'stride': 1, 'padding': "same", 'dilation': 3}),
# Below are the group related samples from common_nn.py
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}),
((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}),
((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}),
((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}),
((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}),
# With defaults
((1, 4, 5, 5), (3, 4, 3, 3), None, {}),
)
def generator():
for input_shape, weight, bias, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(
make_arg(weight),
make_arg(bias) if bias is not None else bias
), kwargs=kwargs)
return list(generator())
def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, num groups, and eps
cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment]
((1, 6, 3), 2, 0.5),
((2, 6, 3), 2, -0.5),
((1, 2), 1, None),
((0, 2), 1, None),
)
def generator():
for input_shape, num_groups, eps in cases:
# Shape of weight and bias should be the same as num_channels
weight = make_arg(input_shape[1])
bias = make_arg(input_shape[1])
kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps}
yield SampleInput(
make_arg(input_shape),
args=(num_groups,),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=(1,))
return list(generator())
def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False)
# Ordered as: input shape, kwargs for momentum, eps
cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment]
((S, S, S), {'momentum': 0.5, 'eps': 0.6}),
((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}),
((3, 2, 4), {'momentum': -1.2}),
((3, 2, 4), {'momentum': 0.0}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}),
)
def generator():
for input_shape, kwargs in cases:
# args: running mean, running var, weight and bias should necessarily be of shape: (channels,)
channels = input_shape[1]
weight = make_arg(channels)
bias = make_arg(channels)
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
new_kwargs = {
'running_mean': running_mean,
'running_var': running_var,
'weight': weight,
'bias': bias,
**kwargs
}
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs=new_kwargs
)
# Checking for permutations of weights and biases as `None`
# instance_norm assumes that if there's a bias, there's a weight
weights = [channels, None]
biases = [None, None]
for weight_channels, bias_channels in zip(weights, biases):
running_mean = make_arg_without_requires_grad(channels, low=0)
running_var = make_arg_without_requires_grad(channels, low=0)
yield SampleInput(
make_arg(input_shape),
args=(),
kwargs={
'running_mean': running_mean,
'running_var': running_var,
'weight': make_arg(weight_channels) if weight_channels is not None else None,
'bias': make_arg(bias_channels) if bias_channels is not None else None
}
)
# Test case for no optional kwargs
yield SampleInput(make_arg((1, 2, 3)), kwargs={})
return list(generator())
def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, normalized_shape and a kwarg dict for eps
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 2, 3), (1, 2, 3), {'eps': 0.5}),
((2, 2, 3), (2, 3), {'eps': -0.5}),
((1,), (1,), {}),
((1, 2), (2,), {}),
((0, 1), (1,), {}),
)
def generator():
for input_shape, normalized_shape, kwargs in cases:
# Shape of weight and bias should be the same as normalized_shape
weight = make_arg(normalized_shape)
bias = make_arg(normalized_shape)
yield SampleInput(
make_arg(input_shape),
args=(normalized_shape, weight, bias),
kwargs=kwargs
)
# Without any optional args
yield SampleInput(make_arg((1, 2)), args=((2,),))
# TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs,
# enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400
# With weight and a `None` bias
# yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None))
# With `None` weight and bias (tests failing for this, see the link above)
# yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,))))
return list(generator())
def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Ordered as input shape, size and a kwarg dict for alpha, beta, and k
cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment]
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}),
((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}),
((1, 6, 3), 2, {'alpha': 3e-05}),
((1, 6, 3), 2, {'beta': 0.5}),
((1, 6, 3), 2, {'k': 1.25}),
((1, 6, 3), 2, {}),
((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}),
)
def generator():
for input_shape, size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs)
return list(generator())
def sample_inputs_hardswish(self, device, dtype, requires_grad):
N = 5
# make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]
return tensors
def sample_inputs_linear(self, device, dtype, requires_grad):
features_options = [[3, 4], [8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor = create_tensor(batch_shape + [in_feat])
weight = create_tensor([out_feat, in_feat])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor, args=(weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor, args=(weight, bias)))
return sample_inputs
def sample_inputs_bilinear(self, device, dtype, requires_grad):
features_options = [[3, 4, 5], [8, 8, 8]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \
itertools.product([True, False], features_options, batch_options):
input_tensor1 = create_tensor(batch_shape + [in_feat1])
input_tensor2 = create_tensor(batch_shape + [in_feat2])
weight = create_tensor([out_feat, in_feat1, in_feat2])
if not has_bias:
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,)))
continue
bias = create_tensor([out_feat])
sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias)))
return sample_inputs
def sample_inputs_glu(self, device, dtype, requires_grad):
features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]]
batch_options: List[List[int]] = [
[], # no batch
[0],
[8],
[2, 3],
]
create_tensor = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-2, high=2)
sample_inputs = []
for features, batch_shape in itertools.product(features_options, batch_options):
ndim = len(features) + len(batch_shape)
for dim in range(ndim):
input_tensor = create_tensor(batch_shape + features)
dim_size = input_tensor.size(dim)
if dim_size > 0 and dim_size % 2 == 0:
sample_inputs.append(SampleInput(input_tensor, args=(dim,)))
return sample_inputs
def sample_inputs_interpolate(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
align_corners_options: Tuple[Any, ...] = (None,)
if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'):
align_corners_options = (True, False, None)
ranks_for_mode = {
'nearest': [1, 2, 3],
'linear': [1],
'bilinear': [2],
'bicubic': [2],
'trilinear': [3],
'area': [1, 2, 3]
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
args=(shape(S, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(shape(L, rank, False), None, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 1.7, mode, align_corners)),
SampleInput(make_arg(shape(D, rank)),
args=(None, 0.6, mode, align_corners)),
])
return sample_inputs
def sample_inputs_upsample(mode, self, device, dtype, requires_grad):
N, C = 2, 3
D = 4
S = 3
L = 5
ranks_for_mode = {
'nearest': [1, 2, 3],
'bilinear': [2],
}
def shape(size, rank, with_batch_channel=True):
if with_batch_channel:
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
sample_inputs = []
for rank in ranks_for_mode[mode]:
sample_inputs.extend([
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(S, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(size=shape(L, rank, False))),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=1.7)),
SampleInput(make_arg(shape(D, rank)),
kwargs=dict(scale_factor=0.6)),
])
return sample_inputs
def sample_inputs_gelu(self, device, dtype, requires_grad):
N = 5
tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-3, high=3)) for _ in range(1, N)]
return tensors
def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
args_for_reduction_with_dim = (
((S, S, S), (1,),),
((S, S, S), (1, True, ),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args_for_reduction_with_dim)
return inputs
def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
inputs.append(SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad),))
return inputs
def _generate_nan_reduction_inputs(device, dtype, requires_grad):
yield from _generate_reduction_inputs(device, dtype, requires_grad)
yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)
yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)
def sample_inputs_nan_reduction(supports_multiple_dims):
# Generates sample inputs for reduction ops that contain the input tensor
# and dim and keepdim kwargs. If a reduction op needs to test additional
# args/kwargs then create a separate sample_inputs function
def fn(op_info, device, dtype, requires_grad):
inputs = []
for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
kwargs=kwargs))
return inputs
return fn
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):
test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))
test_interpolations = ['linear', 'midpoint']
inputs = []
for quantiles in test_quantiles:
for t in _generate_reduction_inputs(device, dtype, requires_grad):
# Add case without dim and keepdim kwargs
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(quantiles,)))
for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):
# Interpolation kwarg for now is only supported when providing both dim and keepdim
kwargs.setdefault('dim', 0)
kwargs.setdefault('keepdim', False)
for interpolation in test_interpolations:
kwargs['interpolation'] = interpolation
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(quantiles,), kwargs=kwargs))
return inputs
def sample_inputs_reduction_count_nonzero(*args, **kwargs):
"""Sample inputs for count_nonzero"""
samples: List[SampleInput] = sample_inputs_reduction(*args, **kwargs)
# count_nonzero does not support keepdim yet
for sample in samples:
sample.kwargs.pop('keepdim', None)
return samples
def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((1, 3, 9, 9), 3),
((1, 3, 9, 9), (4, 4)),
((1, 3, 9, 9), (6, 6)),
((2, 3, 9, 9), (3, 3)),
((1, 1, 4, 4), (2, 2)),
((1, 2, 6, 6), (4, 4)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size
cases = (((2, 3, 5, 5, 5), (2, 2, 2)),
((1, 2, 6, 5, 4), 2),
((1, 2, 5, 6, 5), (2, 3, 2)),
((1, 2, 6, 6, 6), (2, 3, 2)),
((1, 1, 7, 6, 7), (2, 3, 4)),
((1, 1, 4, 5, 4), (2, 2, 1)),
((1, 1, 8, 7, 6), (4, 3, 2)),
((0, 1, 4, 5, 4), (2, 2, 1)))
samples = []
for input_shape, kernel_size in cases:
for return_indices in [False, True]:
# test case passing a single output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2), return_indices=return_indices)
))
# test case passing a tuple output size
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices)
))
# test case passing an output ratio
samples.append(SampleInput(
make_arg(input_shape),
args=(kernel_size,),
kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices)
))
return samples
def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2),
((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2),
((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2),
((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2),
((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2),
((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None))
def generator():
for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases:
yield SampleInput(make_arg(input_shape),
args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override))
# Case with just input_shape and kernel_size
yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3)))
return list(generator())
def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, kwargs
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 9), (3,), dict()),
((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)),
((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)),
((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)),
((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)),
((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)),
]
def generator():
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
return list(generator())
def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
# Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override
cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [
((2, 3, 3, 4, 4), (2, 2, 2), dict()),
((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True,
count_include_pad=False, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True,
count_include_pad=True, divisor_override=2)),
((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)),
((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False,
count_include_pad=False, divisor_override=2)),
((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=-2)),
((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True,
count_include_pad=True, divisor_override=None)),
((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None)),
]
def generator():
for input_shape, kernel_size, kwargs in cases:
yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs)
return list(generator())
def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):
def get_tensor_input(size):
return make_tensor(size, device, dtype, requires_grad=requires_grad)
inputs = []
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))
inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1,)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))
inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))
return inputs
def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)
arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)
inputs.append(SampleInput(arg_a, args=(arg_b,)))
return inputs
def sample_inputs_igamma_igammac(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, low=1e-3)
cases = (((S, S), (S, S), False),
((S, S), (S, ), False),
((S, ), (S, S), True),
((), (), False))
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(other_shape, requires_grad=False),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_dist(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))
ps = (2, 4)
def generate_samples():
for size_x, size_y, p in product(sizes, sizes, ps):
yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))
return list(generate_samples())
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape, low=None, high=None, dtype=dtype, requires_grad=requires_grad):
return make_tensor(shape, device=device, dtype=dtype,
low=low, high=high, requires_grad=requires_grad)
t = make_arg((S, S))
s = make_arg((S, S))
# idx is a permutation of 0...S-1 for this function to be deterministic
idx = torch.randperm(S, device=device, dtype=torch.int64)
samples = [SampleInput(t, args=(1, idx, s))]
# Add scalar cases
scalar_sizes = [(), (1,)]
ts = (make_arg(size) for size in scalar_sizes)
idxs = (make_arg(size, dtype=torch.int64, low=0, high=1, requires_grad=False) for size in scalar_sizes)
ss = (make_arg(size) for size in scalar_sizes)
samples.extend(SampleInput(t.detach().clone().requires_grad_(requires_grad),
args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))
return samples
def sample_inputs_mode(op_info, device, dtype, requires_grad):
inputs = []
args = (
((S, S, S), (),),
((S, S, S), (1, ),),
((S, S, S), (1, True, ),),
((), (),),
((), (0,),),
((), (0, True,),),
)
inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=args,))
for input_tensor, args in args)
return inputs
# Missing to test the nondeterminism of the operation
# https://github.com/pytorch/pytorch/issues/53352
def sample_inputs_put(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs
idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]
idx_list = [idx, -idx - 1]
for idx, acc in product(idx_list, (True, False)):
yield SampleInput(input=make_arg((S, S)),
args=(idx.detach().clone(),
make_arg((S,)),
acc))
# Scalar cases
scalar_sizes = [(), (1,)]
tgt_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
src_gen = (make_arg(size) for size in scalar_sizes)
for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):
yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),
src.detach().clone().requires_grad_(requires_grad),
acc))
# Empty cases
tgt_sizes = [(0,), (), (1,), (3, 2)]
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
yield SampleInput(input=tgt.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),
src.detach().clone().requires_grad_(requires_grad),
acc))
return list(gen_inputs())
def sample_inputs_take(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)
S = 3
def gen_inputs():
# Generic inputs: take S elements out of S * S
index = make_idx((S,), high=(S * S))
for idx in (index, -index - 1):
yield SampleInput(input=make_arg((S, S)), args=(idx,))
# Scalar cases
scalar_sizes = [(), (1,)]
src_gen = (make_arg(size) for size in scalar_sizes)
idx_gen = (make_idx(size, high=1) for size in scalar_sizes)
for src, idx in product(src_gen, idx_gen):
yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),))
# Empty cases
src_sizes = [(0,), (), (1,), (3, 2)]
src_gen = (make_arg(size) for size in src_sizes)
idx = make_idx((0,), high=1)
for src in src_gen:
yield SampleInput(input=src.detach().clone().requires_grad_(requires_grad),
args=(idx.detach().clone(),))
return list(gen_inputs())
def sample_movedim_moveaxis(op_info, device, dtype, requires_grad):
return (
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, 1, 2, 3], [3, 2, 1, 0])),
SampleInput(
make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=([0, -1, -2, -3], [-3, -2, -1, -0]))
)
def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)
shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))
if requires_grad:
# Tests for variant_consistency_jit, grad, gradgrad
# are slower. Use smaller bags of `rep_dims` and `shapes`
# in this case.
rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]
shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]
samples = []
for rep_dim, shape in product(rep_dims, shapes):
# `torch.repeat` errors for `len(rep_dims) < t.dim()`,
# so we filter such combinations.
if op_info.name == 'repeat' and len(rep_dim) < len(shape):
continue
samples.append(SampleInput(make_arg(shape), args=(rep_dim,),))
return samples
def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, S, S), (1, 2, 2)),
((S, S, S), (-1, 2, 2)),
((S, S, S), (1, 0, 0)),
((S, S, S), (-1, 0, 0)),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs):
y_shape_x_shape_and_kwargs = [
((2, 3), (2, 3), {}),
((2, 3), (2, 3), {'dim': 1}),
((6,), (6,), {}),
((6,), None, {}),
# When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad
# See Issue #{61619}
# ((6,0), (6,0), {}),
((2, 3), (1, 3), {}),
((3, 3), (3, 3), {}),
((3, 3), (3, 3), {'dim': -2}),
((5,), None, {'dx': 2.0}),
((2, 2), None, {'dx': 3.0})
]
samples = []
for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs:
y_tensor = make_tensor(y_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
if x_shape is not None:
x_tensor = make_tensor(x_shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg))
else:
samples.append(SampleInput(y_tensor, kwargs=kwarg))
return samples
def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_axes = [
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 3),
((3, 4, 5), -1),
((3, 4, 5), -3),
((), 0)
]
samples = []
for shape, axis in shapes_and_axes:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
samples.append(SampleInput(tensor, args=(axis,),))
return samples
def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs):
shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5))
kernel_sizes = (2, (2, 2), (3, 3))
dilations = (1, 2, (1, 2))
paddings = (0, 1, (1, 1))
strides = (1, 2, (1, 2))
def generator():
cases = product(shapes, kernel_sizes, dilations, paddings, strides)
for shape, kernel_size, dilation, padding, stride in cases:
tensor = make_tensor(shape, device, dtype, requires_grad=requires_grad)
yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride))
# With default args
yield SampleInput(make_tensor((1, 1, 5, 5), device, dtype, requires_grad=requires_grad),
args=((3, 3),))
return list(generator())
def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):
shapes_and_args = (
((S, 1, S, 1), ()),
((1, 1, 1, 1), ()),
((S, 1, S, 1), (1,)),
((S, 1, S, 1), (-1,)),
((S, 1, S, 1), (2,)),
((S, 1, S, 1), (-2,)),
((), (0, )),
)
def generator():
for shape, args in shapes_and_args:
tensor = make_tensor(shape, device, dtype, low=None, high=None,
requires_grad=requires_grad)
yield SampleInput(tensor, args=args)
return list(generator())
def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs):
assert mode in ('constant', 'reflect', 'replicate', 'circular')
if mode in ['reflect', 'replicate']:
cases: tuple = ( # ignore
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
elif mode == 'constant':
cases = (
((1, 3), (1, 2)),
((1, 3), (0, 1)),
((1, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((0, 3, 3), (0, 2, 0, 1)),
((0, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((1, 3, 3), (0, 2, 0, 1)),
((1, 3, 3), (1, 1, 1, 1, 1, 1)),
((0, 3, 3, 3), (1, 2)),
((0, 3, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((3, 3, 5, 5), (1, 2)),
((3, 3, 5, 5), (0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)),
((1, 3, 3, 3, 3), (1, 2)),
((1, 3, 3, 3, 3), (0, 1)),
((1, 3, 3, 3, 3), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
else: # mode == 'circular'
if dtype == torch.bool:
# test_dtypes fails on ASAN with for the case ab
# runtime error: load of value 190, which is not a valid value for type 'bool'
# Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562
# Reference Issue: https://github.com/pytorch/pytorch/issues/63034
cases = (
((2, 3, 3), (1, 2)),
((1, 3, 3), (1, 2)),
)
else:
cases = (
((0, 3, 3), (1, 2)),
((0, 3, 3), (0, 1)),
((1, 3, 3), (1, 2)),
((1, 3, 3), (0, 1)),
((0, 3, 3, 3), (0, 2, 0, 1)),
((3, 3, 5, 5), (0, 2, 0, 1)),
((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)),
((1, 3, 4, 4), (-1, 1, -2, 1)),
)
make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def generator():
if mode == 'constant':
# Default args
yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),))
if mode in ['reflect', 'replicate', 'circular']:
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode))
else: # mode == 'constant'
for pad_value in (1., 2.):
for shape, pad in cases:
yield SampleInput(make_inp(shape), args=(pad, mode, pad_value))
return list(generator())
# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet
# Creates matrices with a positive nonzero determinant
def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):
def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):
u, s, vh = torch.linalg.svd(A, full_matrices=False)
s.clamp_(min=min_singular_value)
A = (u * s.unsqueeze(-2)) @ vh
det = A.det()
if sign is not None:
if A.dim() == 2:
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
samples = []
# cases constructed using make_tensor()
tensor_shapes = (
(S, S),
(1, 1),
(3, 3, S, S),
(3, 3, 1, 1)
)
for shape in tensor_shapes:
t = make_tensor(shape, device=device, dtype=dtype)
d = make_nonzero_det(t).requires_grad_(requires_grad)
samples.append(SampleInput(d))
# cases constructed using:
# 1) make_symmetric_matrices
# 2) make_symmetric_pd_matrices
# 3) make_fullrank_matrices_with_distinct_singular_values
symmetric_shapes = (
(S, S),
(3, S, S),
)
def _helper(constructor, *shape, **kwargs):
t = constructor(*shape, device=device, dtype=dtype)
d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)
samples.append(SampleInput(d))
for shape in symmetric_shapes:
_helper(make_symmetric_matrices, *shape)
_helper(make_symmetric_pd_matrices, *shape)
_helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)
return tuple(samples)
def np_unary_ufunc_integer_promotion_wrapper(fn):
# Wrapper that passes PyTorch's default scalar
# type as an argument to the wrapped NumPy
# unary ufunc when given an integer input.
# This mimicks PyTorch's integer->floating point
# type promotion.
#
# This is necessary when NumPy promotes
# integer types to double, since PyTorch promotes
# integer types to the default scalar type.
# Helper to determine if promotion is needed
def is_integral(dtype):
return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]
@wraps(fn)
def wrapped_fn(x):
# As the default dtype can change, acquire it when function is called.
# NOTE: Promotion in PyTorch is from integer types to the default dtype
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
if is_integral(x.dtype):
return fn(x.astype(np_dtype))
return fn(x)
return wrapped_fn
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, (31,), device=device,
dtype=dtype, requires_grad=requires_grad)
if self.ndimensional == SpectralFuncType.ND:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(8,))),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
elif self.ndimensional == SpectralFuncType.TwoD:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(6, 8))),
SampleInput(nd_tensor(),
kwargs=dict(dim=0)),
SampleInput(nd_tensor(),
kwargs=dict(dim=(0, -1))),
SampleInput(nd_tensor(),
kwargs=dict(dim=(-3, -2, -1))),
]
else:
return [
SampleInput(nd_tensor(),
kwargs=dict(n=10, dim=1, norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(n=7)),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input(()), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)),
SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1))
]
SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: SpectralFuncType,
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
skipCUDAIfRocm,
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
self.ndimensional = ndimensional
def sample_inputs_stft(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt(100), kwargs=dict(n_fft=10))
for center in [False, True]:
yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center))
yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device)
yield SampleInput(
mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
yield SampleInput(
mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center))
if not dtype.is_complex:
yield SampleInput(
mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False))
def sample_inputs_istft(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
real_shape = shape if dtype.is_complex else shape + (2,)
return make_tensor(real_shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10))
yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False))
yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True))
for center in [False, True]:
yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center))
yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center))
window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device)
yield SampleInput(mt((10, 10, 6)), kwargs=dict(
n_fft=10, window=window, center=center, return_complex=dtype.is_complex))
yield SampleInput(mt((10, 10, 10)), kwargs=dict(
n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True))
real_window = window if not dtype.is_complex else window.real
yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center))
def sample_inputs_fftshift(op_info, device, dtype, requires_grad):
def mt(shape, **kwargs):
return make_tensor(shape, device=device, dtype=dtype,
requires_grad=requires_grad, **kwargs)
yield SampleInput(mt((9, 10)))
yield SampleInput(mt((50,)), kwargs=dict(dim=0))
yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,)))
yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1)))
yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2)))
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False):
if same_size:
return [make_tensor((N, N), device, dtype, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), device, dtype, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = "_foreach_" + name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
safe_casts_outputs=True,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
safe_casts_outputs=safe_casts_outputs,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
if name == "norm":
self.ref = torch.linalg.vector_norm
def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):
# Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
inputs = (
torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix
torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices
random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix
random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices
)
test_cases = (torch.linalg.cholesky(a) for a in inputs)
out = []
for a in test_cases:
a.requires_grad = requires_grad
out.append(SampleInput(a))
out.append(SampleInput(a.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=True)))
return out
def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
device = torch.device(device)
drivers: Tuple[str, ...]
if device.type == 'cuda':
drivers = ('gels',)
else:
drivers = ('gels', 'gelsy', 'gelss', 'gelsd')
# we generate matrices of shape (..., n + delta, n)
deltas: Tuple[int, ...]
if device.type == 'cpu' or has_cusolver():
deltas = (-1, 0, +1)
# only square systems if Cusolver is not available
# becase we solve a lstsq problem with a transposed matrix in the backward
else:
deltas = (0,)
out = []
for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas):
shape = batch + (3 + delta, 3)
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
a.requires_grad_(requires_grad)
b = make_tensor(shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver)))
return out
def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):
"""
This function generates input for torch.linalg.householder_product (torch.orgqr).
The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.
Empty, square, rectangular, batched square and batched rectangular input is generated.
"""
# Each column of the matrix is getting multiplied many times leading to very large values for
# the Jacobian matrix entries and making the finite-difference result of grad check less accurate.
# That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.
samples = (
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),
SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
# m = n = S, k = S - 2
SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
# m = S, n = S -1, k = S - 2
SampleInput(make_tensor((S, S - 1), device, dtype, low=-2, high=2, requires_grad=requires_grad),
args=(make_tensor((S - 2,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),
)
return samples
def sample_inputs_ormqr(op_info, device, dtype, requires_grad):
# create a helper function wrapping `make_tensor`
make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def gen_inputs():
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
tf = [True, False]
for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):
reflectors = make_input((*batch, m, n))
tau = make_input((*batch, min(m, n)))
other_matrix_shape = (m, n) if left else (n, m)
other = make_input((*batch, *other_matrix_shape))
kwargs = {"left": left, "transpose": transpose}
yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)
return tuple(gen_inputs())
def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always positive-definite input for torch.linalg.cholesky using
random_hermitian_pd_matrix.
The input is generated as the itertools.product of 'batches' and 'ns'.
In total this function generates 8 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices,
(1, 1) - 1x1 batch of matrices
'ns' gives 0x0 and 5x5 matrices.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
"""
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 0]
out = []
for batch, n, upper in product(batches, ns, [True, False]):
a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
out.append(SampleInput(a, kwargs={"upper": upper}))
return out
def sample_inputs_symeig(op_info, device, dtype, requires_grad=False):
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for o in out:
o.kwargs = {"upper": bool(np.random.choice([True, False])),
"eigenvectors": True}
# A gauge-invariant function
o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))
return out
def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False):
"""
This function generates input for torch.linalg.eigh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
return output[0], abs(output[1])
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument.
"""
def out_fn(output):
if isinstance(output, tuple):
# eigh function
return output[0], abs(output[1])
else:
# eigvalsh function
return output
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.kwargs = {"UPLO": np.random.choice(["L", "U"])}
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):
def out_fn(output):
return output[1]
samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)
for sample in samples:
sample.output_process_fn_grad = out_fn
return samples
def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
real_dtype = out[0].input.real.dtype if dtype.is_complex else dtype
for o in out:
# requires_grad path for rcond tensor is not implemented
for rcond in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)):
o.kwargs = {"rcond": rcond}
return out
def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.pinv with hermitian=True keyword argument.
"""
out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)
for o in out:
o.kwargs = {"hermitian": True}
return out
def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):
"""
This function generates always solvable input for torch.linalg.solve
Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.
The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.
The second input is generated as the product of 'batches', 'ns' and 'nrhs'.
In total this function generates 18 SampleInputs
'batches' cases include:
() - single input,
(0,) - zero batched dimension,
(2,) - batch of two matrices.
'ns' gives 0x0 and 5x5 matrices.
and 'nrhs' controls the number of vectors to solve for:
() - using 1 as the number of vectors implicitly
(1,) - same as () but explicit
(3,) - solve for 3 vectors.
Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.
'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.
torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow
1D tensors (vectors) as the right-hand-side.
Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,
'vector_rhs_allowed' may be removed here as well.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 0]
if vector_rhs_allowed:
nrhs = [(), (1,), (3,)]
else:
nrhs = [(1,), (3,)]
out = []
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
a.requires_grad = requires_grad
b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)
b.requires_grad = requires_grad
out.append(SampleInput(a, args=(b,)))
return out
def sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
bs = (1, 2, 0)
ns = (3, 0)
ks = (1, 3, 0)
def gen_inputs():
for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)):
with torch.no_grad():
if b == 1:
A = make_arg((n, n)) if left else make_arg((k, k))
B = make_arg((n, k))
else:
A = make_arg((b, n, n)) if left else make_arg((b, k, k))
B = make_arg((b, n, k))
if uni:
# Not really necessary, but writing it for consistency
A.diagonal(0, -2, -1).fill_(1.)
else:
d = A.diagonal(0, -2, -1)
d[d.abs() < 1e-6] = 1.
if upper:
A.triu_()
else:
A.tril_()
kwargs = {"upper": upper, "left": left, "unitriangular": uni}
if requires_grad:
for grad_A, grad_B in product((True, False), repeat=2):
# Either A or B needs to have a gradient
if not grad_A and not grad_B:
continue
A.requires_grad_(grad_A)
B.requires_grad_(grad_B)
yield SampleInput(A, args=(B,), kwargs=kwargs)
else:
yield SampleInput(A, args=(B,), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates always solvable input for legacy solve functions
(the ones that are not in torch.linalg module).
The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation
should have b.ndim >= 2, vectors are not allowed.
Also the arguments order is swapped.
"""
out = sample_inputs_linalg_solve(
op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False
)
# Reverses tensor order
for sample in out:
sample.input, sample.args = sample.args[0], (sample.input,)
return out
def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs):
out = sample_inputs_linalg_cholesky_inverse(
op_info, device, dtype, requires_grad=False
)
for sample in out:
psd_matrix = sample.input
sample.input = make_tensor(psd_matrix.shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
sample.args = (psd_matrix.requires_grad_(requires_grad),)
return out
def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
batch_shapes = ((), (3,), (3, 3))
for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)):
shape = batch_shape + (S + size_delta, S)
input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)
yield SampleInput(input, args=(True, get_infos))
return list(generate_samples())
def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs):
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
batches = [(), (0, ), (2, )]
ns = [5, 3, 0]
nrhs = [0, 1, 6]
def generate_samples():
for n, batch, rhs in product(ns, batches, nrhs):
a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)
requires_grad_options = (False,) if not requires_grad else (True, False)
# we try all possible combinations of requires_grad for each input
for lu_requires_grad, b_requires_grad in product(requires_grad_options, requires_grad_options):
# when requires_grad == True, at least one input has to have requires_grad enabled
if requires_grad and not lu_requires_grad and not b_requires_grad:
continue
# we run LU several times to guarantee that the produced SampleInputs are independent
# this is especially important when setting different requries_grad for same tensors!
lu, pivs = a.lu()
lu.requires_grad = lu_requires_grad
b = torch.randn(*batch, n, rhs, dtype=dtype, device=device)
b.requires_grad = b_requires_grad
yield SampleInput(b, args=(lu, pivs))
return list(generate_samples())
def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):
# not needed once OpInfo tests support Iterables
def generate_samples():
for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):
lu_data, pivots = lu_sample.input.lu()
yield SampleInput(lu_data, args=(pivots,))
# generate rectangular inputs
lu_data_shape = lu_data.shape
batch_shape = lu_data_shape[:-2]
n = lu_data_shape[-2]
for shape_inc in ((1, 0), (0, 1)):
lu_data, pivots = make_tensor(
batch_shape + (n + shape_inc[0], n + shape_inc[1]),
device, dtype,
requires_grad=False,
low=None, high=None
).lu()
lu_data.requires_grad_(requires_grad)
yield SampleInput(lu_data, args=(pivots,))
return list(generate_samples())
def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
args = ((1, (0, 1),),
(1, (1, 2),),
(1, (1, -1),),
())
def generator():
for arg in args:
yield SampleInput(make_arg((S, S, S)), args=arg)
return list(generator())
def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):
tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype,
requires_grad=requires_grad)
tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype,
requires_grad=requires_grad)
return [
SampleInput(tensor_nd()),
SampleInput(tensor_nd(), kwargs=dict(dim=1)),
SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)),
SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)),
SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)),
SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)),
]
def _generate_correlation_inputs(device, dtype, requires_grad):
shapes = [(2,), (1, 2), (3, 2), (2, 3)]
for shape in shapes:
yield make_tensor(shape, device, dtype, requires_grad=requires_grad)
def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)]
def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs):
inputs = []
for t in _generate_correlation_inputs(device, dtype, requires_grad):
inputs.append(SampleInput(t))
num_observations = t.numel() if t.ndimension() < 2 else t.size(1)
fweights = make_tensor((num_observations,), device, torch.int, low=1, high=10)
aweights = make_tensor((num_observations,), device, torch.float, low=0, high=1, requires_grad=requires_grad)
for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]):
inputs.append(SampleInput(t.detach().clone().requires_grad_(requires_grad),
kwargs={'correction': correction, 'fweights': fw, 'aweights': aw}))
return inputs
def _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):
"""
This function generates input for torch.svd with distinct singular values so that autograd is always stable.
Matrices of different size:
square matrix - S x S size
tall marix - S x (S-2)
wide matrix - (S-2) x S
and batched variants of above are generated.
Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd
It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.
"""
from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value
# svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice
# along different dimensions when needed (this is used by
# test_cases2:wide_all and wide_all_batched below)
if is_linalg_svd:
def slice_V(v):
return v[..., :(S - 2), :]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0]
return u00 * v00_conj
else:
def slice_V(v):
return v[..., :, :(S - 2)]
def uv_loss(usv):
u00 = usv[0][0, 0]
v00_conj = usv[2][0, 0].conj()
return u00 * v00_conj
test_cases1 = ( # some=True (default)
# loss functions for complex-valued svd have to be "gauge invariant",
# i.e. loss functions shouldn't change when sigh of the singular vectors change.
# the simplest choice to satisfy this requirement is to apply 'abs'.
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: usv[1]), # 'check_grad_s'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: abs(usv[0])), # 'check_grad_u'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
lambda usv: abs(usv[2])), # 'check_grad_v'
# this test is important as it checks the additional term that is non-zero only for complex-valued inputs
# and when the loss function depends both on 'u' and 'v'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device),
uv_loss), # 'check_grad_uv'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device),
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'
)
test_cases2 = ( # some=False
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:(S - 2)],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'
(random_fullrank_matrix_distinct_singular_value(S, dtype=dtype, device=device)[:, :(S - 2)],
lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :(S - 2), :],
lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'
(random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype, device=device)[..., :, :(S - 2)],
lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'
)
out = []
for a, out_fn in test_cases1:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': False}
else:
kwargs = {'some': True}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
for a, out_fn in test_cases2:
a.requires_grad = requires_grad
if is_linalg_svd:
kwargs = {'full_matrices': True}
else:
kwargs = {'some': False}
out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))
return out
def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [((1, 2, 3, 4), (0, 2, 3, 1)),
((1, 2, 3, 4), (0, -2, -1, 1)),
((), ()),
((1, 2, 3, 4), (2, 1, 3, 0))]
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=(args,))
return list(generator())
# Based on erstwhile method_tests tests & some tensor_op_tests for pow
def sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype)
samples = []
if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:
test_cases = (
((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),
((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (), 0.1, 1.1, 0, False, False),
((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),
)
tests_require_resizing = (
((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, requires_grad),
((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, requires_grad),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (1, S, 1), 0, 1, 0.1, requires_grad, requires_grad),
)
cases = test_cases + tests_require_resizing
samples = []
for (shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,
high_e, additive_e, e_grad, broadcasts_input) in cases:
si = SampleInput((make_arg(shape_b, low=low_b, high=high_b) + additive_b).requires_grad_(b_grad),
args=((make_arg(shape_e, low=low_e, high=high_e) + additive_e).requires_grad_(e_grad),),
broadcasts_input=broadcasts_input)
samples.append(si)
tensor_scalar_inputs = (
((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),
((), 1e-3, 1e-3 + 1, 0, requires_grad, (3.14,))
)
more_samples = list(SampleInput(
(make_arg(shape, high=high, low=low) + additive).requires_grad_(b_grad),
args=exp)
for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)
samples = [*samples, *more_samples]
elif dtype in [torch.complex64, torch.complex128]:
args_tuple = (
((2, 2), 0, 5, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14,)),
((), 0, 1, requires_grad, (3.14j,))
)
samples = list(SampleInput(
(make_arg(shape, high=high, low=low) + 1e-3 * (1 + 1j)).requires_grad_(b_grad),
args=arg)
for shape, low, high, b_grad, arg in args_tuple)
else: # integral dtype
exp_tuple = (1, 2, 3)
samples = list(SampleInput(
make_arg((2, 2), requires_grad=requires_grad),
args=(arg,))
for arg in exp_tuple)
samples.append(SampleInput(
make_arg((2, 2), requires_grad=requires_grad),
args=(make_arg((2, 2), requires_grad=requires_grad),)))
return tuple(samples)
def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)
def sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):
return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)
def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs):
N = 10
tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype,
requires_grad=requires_grad)) for _ in range(1, N)]
return tensors
def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):
eigvecs = make_tensor((S, S), device=device, dtype=dtype,
low=None, high=None)
eigvals = make_tensor((S,), device=device, dtype=dtype,
low=None, high=None)
# we produce only diagonazible inputs which do not have
# complex eigenvalues for real inputs, as there is no
# backward implementation for real inputs with complex
# eigenvalues yet.
input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()
input.requires_grad_(requires_grad)
def process_output(eigpair):
eigvals, eigvecs = eigpair
if dtype.is_complex:
# eig produces eigenvectors which are normalized to 1 norm.
# Note that if v is an eigenvector, so is v * e^{i \phi},
# and |v| = |v * e^{i \phi}| = 1.
# This, however, makes the eigenvector backward computation process
# rather unstable unless the objective function is gauge-invariant,
# that is if f(z) == f(|z|), for example.
# Hence for complex inputs we ignore the phases and return only
# the absolute values.
return eigvals, eigvecs.abs()
else:
return eigvals, eigvecs
return [
SampleInput(
input,
kwargs=dict(eigenvectors=True),
output_process_fn_grad=process_output
),
]
def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):
def c(t):
return t.detach().clone().requires_grad_(requires_grad)
x = make_tensor((3,), device, dtype, requires_grad=requires_grad)
y = make_tensor((4,), device, dtype, requires_grad=requires_grad)
A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad)
B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)
C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)
D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad)
E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)
H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad)
I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)
inputs = []
# Vector operations
inputs.append(SampleInput([c(x)], args=('i->',))) # sum
inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer
# Matrix operations
inputs.append(SampleInput([c(A)], args=("ij->i",))) # col sum
inputs.append(SampleInput([c(A), c(B)], args=("ij,kj->ik",))) # matmul
inputs.append(SampleInput([c(A), c(E)], args=("ij,Ab->ijAb",))) # matrix outer product
# Tensor operations
inputs.append(SampleInput([c(C), c(D)], args=("aij,ajk->aik",))) # batch matmul
inputs.append(SampleInput([c(D), c(E)], args=("aij,jk->aik",))) # tensor matrix contraction
inputs.append(SampleInput([c(C), c(B)], args=("ijk,ik->j",))) # non contiguous
# Test diagonals
inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace
# Test ellipsis
inputs.append(SampleInput([c(H)], args=("i...->...",)))
inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',)))
return inputs
def sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):
"""
This function generates input for torch.linalg.qr
The input is generated as the itertools.product of 'batches' and 'ns'.
"""
batches = [(), (0,), (2, ), (1, 1)]
ns = [5, 2, 0]
out = []
for batch, (m, n) in product(batches, product(ns, ns)):
a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)
out.append(SampleInput(a))
return out
def sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):
batches = [(), (0, ), (2, ), (1, 1)]
ns = [5, 2, 0]
samples = []
for batch, (m, n) in product(batches, product(ns, ns)):
# TODO: CUDA path doesn't work with batched or empty inputs
if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):
continue
a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)
samples.append(SampleInput(a))
return samples
def sample_inputs_flip(op_info, device, dtype, requires_grad):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((S, M, S), (S, 0, M))
all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())
def gen_samples():
for size, dims in product(sizes, all_dims):
yield SampleInput(make_arg(size), kwargs={"dims": dims})
return list(gen_samples())
def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_fmod_remainder(op_info, device, dtype, requires_grad, *, autodiffed=False, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
if autodiffed:
samples = (
((S, S, S), 1.5, False),
((), 1.5, False),
)
else:
cases = (
((S, S, S), (), False),
((S, S, S), (S, S, S), False),
((S, S, S), (S,), False),
)
# Sample inputs with scalars as torch tensors
cases_with_tensor_scalar = (
((), torch.tensor(1, dtype=dtype, device=device, requires_grad=False), False),
)
# Sample inputs with broadcasting
cases_with_broadcasting = (
((S,), (S, S, S), True),
((S, 1, S), (S, S, S), True),
((), (S, S, S), True),
)
samples = cases + cases_with_tensor_scalar + cases_with_broadcasting # type: ignore[assignment]
def generator():
for shape, arg_other, broadcasts_input in samples:
if isinstance(arg_other, tuple):
arg = make_arg(arg_other, requires_grad=False, exclude_zero=True)
else:
# shape_other is scalar or torch.tensor
arg = arg_other
yield(SampleInput(make_arg(shape), args=(arg,), broadcasts_input=broadcasts_input))
return list(generator())
# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!
def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):
x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)
def detach(tensor):
return tensor.clone().detach_().requires_grad_(requires_grad)
return [
SampleInput(detach(x), args=(lb, ub)),
SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),
SampleInput(detach(x), args=(detach(lb[:, :1]),)),
]
def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):
tensors = (
make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
if dtype is torch.uint8:
min_max_vals = ((2, 5), (3, 7))
else:
min_max_vals = ((0, 1), (-1, 1))
output = [SampleInput(
tensor.detach().clone().requires_grad_(requires_grad),
args=vals) for tensor, vals in product(tensors, min_max_vals)]
output += [
SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),
args=(0.5, None)),
SampleInput(tensors[0].detach().clone().requires_grad_(requires_grad),
args=(None, 0.5))]
empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)
output.append(SampleInput(empty_tensor, args=(0.0, 1.0)))
return output
def sample_kwargs_clamp_scalar(device, dtype, input):
if dtype is torch.uint8:
min_val, max_val = (random.randint(1, 3), random.randint(4, 8))
elif dtype.is_floating_point:
min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]
else:
min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))
return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}
def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs):
sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),))
sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': 1})
sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),
args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),),
kwargs={'dim': -1})
return (sample0, sample1, sample2)
def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_zeros(dim_select):
assert len(dim_select) == 2
result = make_arg(3 * (S,))
with torch.no_grad():
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
# will not be needed once OpInfo tests suport Iterables
def sample_generator():
for dim in range(3):
yield SampleInput(make_arg((S, S, S)), args=(dim,))
# Scalar tensors and empty tensor
for size in [(), (1,), (0,)]:
yield SampleInput(make_arg(size), args=(0,))
yield SampleInput(prod_zeros([0, 1]), args=(1,))
yield SampleInput(prod_zeros([0, 2]), args=(1,))
yield SampleInput(prod_zeros([1, 2]), args=(1,))
# test dtype kwarg
yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})
return list(sample_generator())
def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):
return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]
def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):
tensors = (
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
make_tensor((), device, dtype, requires_grad=requires_grad)
)
return [SampleInput(tensor) for tensor in tensors]
def sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor(*shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
cases = [
# no broadcast
((S, S, S), (S, S, S), False),
# broadcast rhs
((S, S, S), (S, S), False),
# scalar
((S, S), 3.14, False),
# scalar positive zero
((S, S), 0.0, False),
# scalar negative zero
((S, S), -0.0, False),
]
# broadcast lhs
cases.append(((S, S), (S, S, S), True))
# broadcast all
cases.append(((S, 1, S), (M, S), True))
def generator():
for input_shape, arg_val, broadcasts_input in cases:
if isinstance(arg_val, tuple):
arg = _make_tensor(*arg_val)
else:
# arg_val is scalar
arg = arg_val
yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_prod(op_info, device, dtype, requires_grad):
def make_arg(shape):
# shrink values to be in the interval [-1, +1] for better precision in gradgradcheck
return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)
def prod_single_zero():
result = make_arg(2 * (S,))
with torch.no_grad():
result[0, 1] = 0
return result
# will not be needed once OpInfo tests support Iterables
def sample_generator():
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
# only Tensor, ignore other inputs
yield SampleInput(sample.input.detach().clone().requires_grad_(requires_grad))
yield sample
# Generates samples with keepdim = True
for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):
sample.kwargs['keepdim'] = True
yield sample
yield SampleInput(prod_single_zero())
yield SampleInput(make_arg((3, 3, 3)), args=(1,))
yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})
# test zero scalar tensor
zero = make_arg(())
with torch.no_grad():
zero.zero_()
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad))
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad), args=(0,))
yield SampleInput(zero.detach().clone().requires_grad_(requires_grad),
args=(0,),
kwargs={'keepdim': True})
return list(sample_generator())
def error_inputs_neg(op_info, device, **kwargs):
si = SampleInput(torch.tensor((False, True), device=device))
msg = ("Negation, the `\\-` operator, on a bool tensor is not supported."
" If you are trying to invert a mask, use the `\\~` or"
" `logical_not\\(\\)` operator instead.")
return (ErrorInput(si, error_type=RuntimeError, error_regex=msg),)
def sample_inputs_nextafter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (
((S, S), (S, S), False),
((S, S), (S,), False),
((S, ), (S, S), True)
)
def generator():
for shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape), args=(make_arg(other_shape),), broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):
vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))
tensors = (
make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),
make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),
)
args = ((), (2,), (-2,), (1,), (2,))
samples = []
for tensor, arg in product(tensors, args):
samples.append(SampleInput(tensor.detach().clone().requires_grad_(requires_grad), args=arg))
return samples + [vec_sample]
def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
yield SampleInput(make_arg(shape), args=arg)
return list(generator())
def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# Shapes for 2D Tensors
shapes_2d = ((M, M), (3, 5), (5, 3))
# Shapes for 3D Tensors
shapes_3d = ((M, M, M),)
args_2d = ((), (2,), (-2,), (1,))
args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1))
def generator():
for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)):
input_ = make_arg(input_shape)
# We can programatically figure out the right shape for src:
# It should be the same size as input.diagonal(other_args...)
if not isinstance(arg, tuple):
arg_tuple = (arg,)
else:
arg_tuple = arg
src_shape = input_.diagonal(*arg_tuple).size()
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *arg_tuple))
return list(generator())
def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()),
SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),)
def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs):
batch_size, num_classes = shape = (2, 3)
reductions = ("mean", "sum", "none")
input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [
(shape, dict()),
((*shape, 1), dict()),
((*shape, 1, 2), dict()),
((*shape, 1, 2, 3), dict()),
*[(shape, dict(reduction=reduction)) for reduction in reductions],
*[
(
shape,
dict(
weight=make_tensor((num_classes,), device=device, dtype=dtype),
reduction=reduction,
),
)
for reduction in reductions
],
(shape, dict(ignore_index=1)),
]
sample_inputs = []
for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)):
input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad)
if probabilities_target:
# ignore_index is not supported for probabilities target
if "ignore_index" in kwargs:
continue
target = make_tensor(
input_shape,
low=0,
high=1,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
else:
target = make_tensor(
(batch_size, *input_shape[2:]),
low=0,
high=num_classes,
device=device,
dtype=torch.long,
)
if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]):
# make sure at least one item in target is not ignored
target[0] = random.sample(set(range(num_classes)) - {kwargs["ignore_index"]}, 1)[0]
sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs))
return sample_inputs
# Used for log_softmax, softmax, softmin
def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = [
((S, ), (0, )),
((S, S), (0, )),
((S, S), (1, )),
((S, S), (-1, )),
((S, M, S), (2, )),
]
# PyTorch on XLA throws an error when passed with dim argument for 0d tensor.
# See https://github.com/pytorch/xla/issues/3061 for more details.
if torch.device(device).type != 'xla':
cases.append(((), (0, )))
return [
SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None)
for shape, dim in cases
]
def sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs):
"""Sample inputs for masked softmax, log_softmax, and softmin.
Masked normalization operator is a reduction operator with
trailing mask optional argument. A mask is a bool tensor with the
same shape as input or a shape that is broadcastable to input
shape.
"""
inputs: List[SampleInput] = []
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs):
for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs):
sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs)
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for masked normalize.
"""
inputs: List[SampleInput] = []
for ord in [2.0, 1, float('inf'), float('-inf'), 0]:
for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs):
sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy()
inputs.append(SampleInput(sample_input.input.detach().clone().requires_grad_(requires_grad),
args=sample_input_args, kwargs=sample_input_kwargs))
return inputs
def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):
low, high = op_info.domain
# Note: Operator is very sensitive at points near the
# start and end of domain and leads to NaN for float16
# if domain_eps is 1e-5.
domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2
low = low + domain_eps
high = high - domain_eps
samples = (
SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype, low=low,
high=high, requires_grad=requires_grad), args=(0.2,)),
)
return samples
def sample_inputs_isin(op_info, device, dtype, requires_grad):
element = make_tensor((L,), device, dtype, low=None, high=None, requires_grad=requires_grad)
indices = torch.randint(0, L, size=[S])
test_elements = element[indices].clone()
return [
SampleInput(element, args=(test_elements,))
]
def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def samples_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))
yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),
broadcasts_input=True)
samples = tuple(samples_generator())
return samples
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def sample_generator():
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, make_arg(())),
broadcasts_input=True)
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
samples = tuple(sample_generator())
return samples
def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn(M, M, device=device) > 0,)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M,), device=device) > 0,)),
SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.tensor(1, device=device, dtype=torch.bool),)),
SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),
args=(torch.randn((M, M), device=device) > 0,)),
)
return samples
def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):
samples = (
SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),
SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),
)
return samples
def sample_inputs_matmul(op_info, device, dtype, requires_grad):
test_cases = (((L,), (L,)),
((S, M), (M,)),
((M,), (M, S)),
((S, M), (M, S)),
((S, 0), (0, M)),
((S, S, M), (M,)),
((S, S, M), (M, S)),
((S, S, 0), (0, S)),
((M,), (S, M, S)),
((S, M), (S, M, S)),
((0, 0), (S, 0, 0)),
((S, S, M, M), (S, S, M, S)),
((S, S, M, M), (M,)),
((M,), (S, S, M, S)))
sample_inputs = []
for lhs_shape, rhs_shape in test_cases:
lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
if op_info.name == 'matmul':
sample_inputs.append(SampleInput(lhs, args=(rhs,)))
elif op_info.name == '__rmatmul__':
sample_inputs.append(SampleInput(rhs, args=(lhs,)))
else:
raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'")
return tuple(sample_inputs)
def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype,
requires_grad: bool,
*, variant: str) -> List[SampleInput]:
if variant == 'variadic':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors[0], tuple(tensors[1:])
elif variant == 'list':
def make_inputs(
tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor,
List[torch.Tensor]],
Tuple[torch.Tensor, ...]]:
return tensors, ()
else:
raise ValueError(
'Unsupported variant, must be one of {"variadic", "list"}. '
f'Got "{variant}".')
SCALAR = torch.Size([])
VECTOR = torch.Size([3])
test_cases: List[List[torch.Size]] = [
[SCALAR],
[VECTOR],
[VECTOR, SCALAR],
[VECTOR, SCALAR, VECTOR],
[VECTOR, SCALAR, VECTOR, SCALAR],
]
sample_inputs = []
for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}):
input, args = make_inputs(
[make_tensor(shape, device, dtype, requires_grad=requires_grad)
for shape in shapes])
sample_inputs.append(SampleInput(input=input, args=args,
kwargs=dict(indexing=indexing)))
return sample_inputs
def sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):
def _make_tensor_helper(shape):
return make_tensor(shape, device, dtype, requires_grad=requires_grad)
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
)
return samples
def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
def generator():
for shape, n in product(tensor_shapes, ns):
yield SampleInput(make_arg(shape), args=(n,))
return list(generator())
def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
tensor_shapes = ((S, S), ())
ns = (1, 2, 3, 4, 5)
# Since the accepted lower bound for input
# to mvlgamma depends on `p` argument,
# the following function computes the lower bound
# which we pass to `make_tensor`.
def compute_min_val(p):
return (p - 1.) / 2
def generator():
for shape, n in product(tensor_shapes, ns):
min_val = compute_min_val(n)
if not dtype.is_floating_point:
# Round-up minimum value for integral dtypes
min_val += 1
yield SampleInput(make_arg(shape, low=min_val), args=(n,))
return list(generator())
# Since `mvlgamma` has multiple entries,
# there are multiple common skips for the additional
# entries. Following function is a helper to that end.
def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
)
return skips
# To test reference numerics against multiple values of argument `p`,
# we make multiple OpInfo entries with each entry corresponding to different value of p.
# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.
# Class `MvlGammaInfo` already contains the basic information related to the operator,
# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which
# differ between the entries.
class MvlGammaInfo(UnaryUfuncInfo):
def __init__(self, variant_test_name, domain, skips, sample_kwargs):
super(MvlGammaInfo, self).__init__(
'mvlgamma',
ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,
aliases=('special.multigammaln',),
variant_test_name=variant_test_name,
domain=domain,
decorators=(precisionOverride({torch.float16: 5e-2}),),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half),
sample_inputs_func=sample_inputs_mvlgamma,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=skips,
sample_kwargs=sample_kwargs)
def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):
low, _ = op_info.domain
if requires_grad:
low = 0 + op_info._domain_eps
return (SampleInput(make_tensor((L,), device, dtype,
low=low,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=low,
requires_grad=requires_grad)))
def sample_inputs_zeta(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
samples = (SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(make_arg((S,), low=2, requires_grad=False),)),
SampleInput(make_arg((S,), low=1, requires_grad=requires_grad),
args=(3.,)),
)
return samples
# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`,
# supports `exclude` argument.
# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617
def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S,), device, dtype,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
requires_grad=requires_grad)))
if requires_grad and op_info.op == torch.special.i0e:
# NOTE: `i0e`'s first-order gradient is not continous
# at `0`, hence we don't test `i0e` with any input being `0`.
# TODO: Remove this when `make_tensor` supports excluding `0`.
with torch.no_grad():
for sample in samples:
t = sample.input
t[t == 0] = torch.finfo(dtype).eps # type: ignore[index]
elif requires_grad and op_info.op != torch.special.i0e:
# Special Case for gradient
# Sample with `0` in the input
t = make_tensor((S,), device, dtype,
requires_grad=requires_grad)
with torch.no_grad():
t[0] = 0
samples += (SampleInput(t),) # type: ignore[assignment]
return samples
def sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):
filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]
return (SampleInput(input.detach().clone().requires_grad_(requires_grad),
args=(arg,), kwargs=dict(alpha=alpha))
for (input, arg), alpha in filtered_product)
int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j
if variant == 'tensor':
samples = (
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),
SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),
SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),
)
if dtype.is_complex:
alphas = [int_alpha, float_alpha, complex_alpha]
elif dtype.is_floating_point:
alphas = [int_alpha, float_alpha]
else:
alphas = [int_alpha]
args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),
(_make_tensor_helper((S, S)), _make_tensor_helper((S,))),
(_make_tensor_helper(()), _make_tensor_helper(())))
samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]
elif variant == 'scalar':
# Scalar Other
samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),
SampleInput(_make_tensor_helper(()), args=(0.5,)),
SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),
SampleInput(_make_tensor_helper(()), args=(1.5j,)),
SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),
SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))
scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),
(_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),
(_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]
alphas = [int_alpha, float_alpha, complex_alpha]
def filter_fn(arg_alpha):
arg, alpha = arg_alpha
if isinstance(alpha, complex):
if dtype.is_complex or isinstance(arg[1], complex):
return True
else:
# complex alpha is valid only if either `self` or `other` is complex
return False
# Non-Complex Alpha
return True
# Samples with alpha (scalar version) covers the following cases
# self | other | alpha
# -----------------------------------------
# real | real | real (int and float)
# real | complex | real and complex
# complex | real | real and complex
# complex | complex | real and complex
#
# It does not cover
# real | real | complex
# x = torch.randn(2, requires_grad=True, dtype=torch.float64)
# torch.rsub(x, 1, alpha=1. + 1.6j)
# RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)
samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]
else:
raise Exception("Invalid variant!")
return samples
def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),
SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),
SampleInput(_make_tensor_helper(()), args=(0,)),
]
if supports_dtype_kwargs:
# NOTE: if `dtype` is not same as input, then inplace variants fail with
# `provided dtype must match the dtype of self tensor in cumsum`
samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))
return samples
def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):
test_cases = (
((), (0, 1, 1)),
((S, S, S, S), (0, 3, 1)),
((S, S, S, S), (1, 3, 1)),
((S, S, S, S), (2, 3, 1)),
((S, S, S, S), (3, 3, 1)),
((S, S, S, S), (0, 3, 2)),
((S, S, S, S), (1, 3, 2)),
((S, S, S, S), (2, 3, 2)),
((S, S, S, S), (3, 3, 2)),
((S, S, S, S), (0, 4, 1)),
((S, S, S, S), (1, 4, 1)),
((S, S, S, S), (2, 4, 1)),
((S, S, S, S), (3, 4, 1)),
((M,), (0, 3, 1)),
((M,), (0, 3, 2)),
((M,), (0, 3, 3)),
((1000,), (0, 3, 11)),
((1000,), (0, 2, 27)),
((10, 10), (0, 1, 2)),
((10, 10), (1, 2, 3)),
((10, 10), (1, 2, 2)),
((S, S, S), (2, 3, 2)),
)
sample_inputs = []
for shape, arguments in test_cases:
sample_inputs += [SampleInput(make_tensor(shape, device, dtype,
low=None, high=None,
requires_grad=requires_grad),
args=arguments)]
return sample_inputs
def sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (
((S, S, S), (S, S, S), False),
((), (), False),
((S, S, S), (S,), False),
((S,), (S, S, S), True),
((S, 1, S), (S, S), True),
)
def generator():
for x_shape, y_shape, broadcasts_input in cases:
yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
if list_args:
cases = (
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),),
((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),)
)
else:
cases = ( # type: ignore[assignment]
((S, S, S), (2,)),
((S, S, S), (S, 1)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)),
((S, S, S), ([int(S / 3), S - int(S / 3), 0],)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)),
((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)),
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_msort(op_info, device, dtype, requires_grad):
def apply_grad(t):
if dtype in floating_types_and(torch.float16, torch.bfloat16):
t.requires_grad_(requires_grad)
def large_1d_unique(dtype, device):
res = torch.randperm(L * L * L, dtype=torch.int64, device=device)
res = res.to(dtype)
apply_grad(res)
return res
samples = []
# Test case for large tensor.
largesample = SampleInput(large_1d_unique(dtype, device))
sample = SampleInput(make_tensor((S, M, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad))
return [largesample, sample]
def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = (
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),
# broadcast rhs with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),
# broadcast rhs and weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),
# broadcast lhs
SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# scalar broadcast_lhs
SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),
# tensor broadcast all
SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),
broadcasts_input=True),
# no broadcast with weight tensor
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))),
# broadcast lhs with weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor
SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True),
# broadcast lhs and weight tensor variant
SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True),
)
if dtype.is_complex:
samples = samples + ( # type: ignore[assignment]
# no broadcast
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),
# broadcast rhs
SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),
# scalar tensor
SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),
SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),
# broadcast rhs scalar-tensor
SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),
SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),
)
return samples
def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):
cases = (
((2, 2, 2), (2, 2, 2), (2)),
((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),
)
samples = []
for first_shape, second_shape, dims in cases:
samples.append(SampleInput(make_tensor(first_shape, device, dtype,
requires_grad=requires_grad),
args=(make_tensor(second_shape, device, dtype,
requires_grad=requires_grad),),
kwargs=dict(dims=dims,)))
return tuple(samples)
def sample_inputs_kron(op_info, device, dtype, requires_grad):
test_cases = (
((S, S), (M, L)),
)
sample_inputs = []
for input_shape, other_shape in test_cases:
input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)
sample = SampleInput(input, args=(other,))
sample_inputs.append(sample)
return tuple(sample_inputs)
def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):
return (
SampleInput(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, ), device, dtype, requires_grad=requires_grad),
)
),
SampleInput(
make_tensor((), device, dtype, requires_grad=requires_grad),
args=(
make_tensor((S, S), device, dtype, requires_grad=requires_grad),
)
),
)
def sample_inputs_scatter(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
(_tensor(()), (0, zero.clone().detach(), 2.5)),
)
samples = []
for tensor, args in test_cases:
samples.append(SampleInput(tensor, args=args))
if not requires_grad:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'add'}
))
if dtype.is_floating_point:
samples.append(SampleInput(
tensor.clone().detach(),
args=args, kwargs={'reduce': 'multiply'}
))
return samples
def sample_inputs_scatter_add(op_info, device, dtype, requires_grad):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
def _gather(shape, index_dim, max_indices):
return gather_variable(shape, index_dim, max_indices, device=device)
zero = torch.tensor(0, dtype=torch.long, device=device)
test_cases = (
(_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),
(_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),
(_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),
(_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),
(_tensor(()), (0, zero.clone().detach(), _tensor(()))),
)
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):
samples = (SampleInput(make_tensor((S, S, S), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),
SampleInput(make_tensor((), device, dtype,
low=None, high=None,
requires_grad=requires_grad)),)
return samples
def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((M, M), ()),
((M, M), (2,),),
((S, M, M), ()),
((S, M, M), (2,)),
((3, 3, S, S), ()),)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, M, S)))
yield SampleInput(make_arg(()))
return list(generator())
def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def generator():
yield SampleInput(make_arg((S, S)))
return list(generator())
def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
# list of tuples (shape, shape) defining the shapes of the input and output tensors
sample_shapes = [
((), ()),
((S), (1)),
((S, S), (1, 1)),
((S, S), (1, S)),
((S, S), (S, S)),
((S, S, S), (S, 1, S)),
]
samples = []
for input_shape, output_shape in sample_shapes:
input_t = make_arg(input_shape)
samples.append(SampleInput(input_t, args=(output_shape,)))
return samples
def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1, 1)),
)
def generator():
for shape, args_or_shape in cases:
# Update `args` based on operator
if op_info.name == 'resize_':
# resize_ takes shape/tuple of ints,
args = (args_or_shape, )
elif op_info.name == 'resize_as_':
# resize_as_ takes another tensor
args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment]
else:
raise ValueError("sample_inputs_resize_ops is being used with incorrect operator")
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S * S, S)),
((S * S, S), (S, S, S)),
((S * S, S), (S, -1, S)),
((S * S * 2, S), (S, -1)),
((S,), (S,)),
((), ()),
((), (1,)))
def generator():
for case in cases:
shape, args = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(args, )))
if op_info.name != "view" and len(shape) >= 2:
yield(SampleInput(
inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),
args=(args, )))
return list(generator())
def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (S * S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for case in cases:
shape, shape_other = case
inp = make_arg(shape, requires_grad=requires_grad)
yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),)))
if op_info.name != "view_as" and len(shape) >= 2:
yield(SampleInput(
inp.detach().clone().transpose(0, 1).requires_grad_(requires_grad),
args=(make_arg(shape_other, requires_grad=False),)))
return list(generator())
def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs):
input_list = []
shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),)
make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
samples = []
for shape in shapes:
input_list.append(make_tensor_partial(shape))
samples.append(SampleInput(make_tensor_partial(shape)))
samples.append(SampleInput(input_list, ))
return samples
def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (1, 2)),
((S, S, S), (-1, 2)),
((S, S, S), (-1, -1)),
((S, S, S), (1, -1)),
((S,), (0, 2))
)
def generator():
for shape, args in cases:
yield SampleInput(make_arg(shape), args=args)
return list(generator())
def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, S, S), (S, S), (1, 2)),
((S, S, S), (S, S), (-1, 2)),
((S, S, S), (S, S), (-1, -1)),
((S, S, S), (S, S), (1, -1)),
((S,), (), (0, 2))
)
def generator():
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
return list(generator())
def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)),
((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)),
((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)),
((L, L, L), (L, L, L,), (1, 0, L, 1)),
((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)),
((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)),
((L, L, L), (L, L, L,), (2, 0, L, 1)),
((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)),
((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)),
)
def generator():
for input_shape, src_shape, args in cases:
input_ = make_arg(input_shape)
src = make_arg(src_shape)
yield SampleInput(input_, args=(src, *args))
return list(generator())
def sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):
def _make_tensor_helper(shape, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
scalar: Union[int, float, complex] = 3
if dtype.is_floating_point:
scalar = 3.14
elif dtype.is_complex:
scalar = 3.14j
samples = [
SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),
SampleInput(_make_tensor_helper(()), args=(scalar,)),
]
return samples
def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
cases = (((S, 1, 1), (S, S, S)),
((S, 1, S), (S, S, S)),
((S, 1, S), (-1, S, -1)),
((S, 1, S), (-1, S, S)),
((S, 1), (S, S, S)),
((1,), (S, S, S)),
((1, S), (1, 1, S)),
((), ()),
((), (1, 3, 2)),
)
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape), args=(args, )))
return list(generator())
def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((),
(2, 3))
memory_format_options = [None, torch.contiguous_format]
def generator():
for shape, memory_format in itertools.product(shapes, memory_format_options):
yield SampleInput(make_arg(shape),
kwargs={'memory_format': memory_format} if memory_format else {})
return list(generator())
def sample_inputs_conversion_channels_last(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
return [
# Channels last case: input must be 4d
SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last})
]
def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, 1, 1), (S, S, S)),
((), ()),
((), (1, 1)),
)
def generator():
for shape, shape_other in cases:
yield(SampleInput(make_arg(shape, requires_grad=requires_grad),
args=(make_arg(shape_other, requires_grad=False), )))
return list(generator())
def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
def make_bool_mask(shape):
# Make sure atleast one element is nonzero,
# except for empty tensor
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
if mask_t.numel() == 0:
return mask_t
elif mask_t.numel() == 1:
mask_t.fill_(True)
return mask_t
if mask_t.sum() == 0:
def random_index(shape):
return tuple(map(lambda max_idx: random.randint(0, max_idx), shape))
mask_t[random_index(mask_t.shape)] = True
return mask_t
return mask_t
cases = (((M, M), (M, M), (M, M), False),
((M, 1, M), (M, M), (M, M, 1), True),
((), (), (), False),
((M, 1, M), (), (M, M, 1), True),
((), (M, M), (), True),)
def generator():
for shape, mask_shape, other_shape, broadcasts_input in cases:
yield SampleInput(make_arg(shape),
args=(make_bool_mask(mask_shape), make_arg(other_shape)),
broadcasts_input=broadcasts_input)
return list(generator())
def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S))
inputs = []
for shape in sizes:
# construct input without any non-zero elements
zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad)
inputs.append(zeros)
# construct input with mixed zero and non-zero elements
mixed = make_arg(shape).requires_grad_(False)
mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)
mixed[mask_t] = 0
inputs.append(mixed)
def generator():
for input_t, as_tuple in product(inputs, [False, True]):
yield(SampleInput(input_t.detach().clone().requires_grad_(requires_grad),
kwargs=dict(as_tuple=as_tuple)))
return list(generator())
def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device)
cases = (((S, S, S), (2,)),
((S, S, S), (S, 1)),
((S, S, S), (S, -1)))
def generator():
for case in cases:
shape, args = case
yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))
return list(generator())
def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs):
def _tensor(shape, dtype=dtype, low=None, high=None):
return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)
test_cases = [
(_tensor((S, S, S)), (2,)),
(_tensor((S, S, S)), (2, 1,)),
(_tensor((S, S, S)), (2, -1,)),
(_tensor((S, S, S)), (2, 1, True,)),
(_tensor((S, S, S)), (2, -1, True,)),
(_tensor((S,)), (2, 0,)),
(_tensor((S,)), (2, 0, True,)),
(_tensor(()), (1,)),
(_tensor(()), (1, 0,)),
(_tensor(()), (1, 0, True))
]
return [SampleInput(tensor, args=args) for tensor, args in test_cases]
def error_inputs_kthvalue(op_info, device, **kwargs):
# tests overlapping output fails
t = make_tensor(10, dtype=torch.float32, device=device)
indices = torch.empty((), device=device, dtype=torch.long)
si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)})
k_out_of_range_err = "selected number k out of range for dimension"
return (ErrorInput(si, error_type=RuntimeError, error_regex="unsupported operation"),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)),
error_type=RuntimeError, error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)),
error_type=RuntimeError, error_regex=k_out_of_range_err),
ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)),
error_type=RuntimeError, error_regex=k_out_of_range_err),)
def sample_inputs_dropout(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_arg()),
SampleInput(make_arg(), kwargs=dict(p=0.0)),
SampleInput(make_arg(), kwargs=dict(p=1.0)),
SampleInput(make_arg(), kwargs=dict(training=False)),
]
def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high, noncontiguous=False):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high,
noncontiguous=noncontiguous)
def make_per_sample_weight(flag, idx):
# a tensor of float / double weights, or None
# to indicate all weights should be taken to be 1
if flag:
return make_input(idx.shape)
return None
def generator():
offsets = torch.tensor([0, 3], device=device, dtype=torch.long)
for generate_per_sample_weight in (True, False):
for mode in ('sum', 'mean', 'max'):
# per_sample_weights is only supported for mode='sum' (got mode='****')
if generate_per_sample_weight and mode in ('mean', 'max'):
continue
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': offsets, 'mode': mode,
'per_sample_weights': per_sample_weights})
# bag with zero length
idx = make_long_input((S,), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long),
'mode': mode,
'per_sample_weights': per_sample_weights})
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((S, S), low=0, high=M, noncontiguous=True)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((M, S)), args=(idx,),
kwargs={'mode': mode, 'per_sample_weights': per_sample_weights})
# The gradient vector at `padding_idx` is not updated.
# Negative padding_idx
idx = make_long_input((6,), low=0, high=S)
idx[0] = 4
idx[4] = 4
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': -1, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights},)
idx = make_long_input((3, 3), low=0, high=S)
# Positive padding_idx
idx[0, 0] = 2
idx[1, 1] = 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(make_input((S, S)), args=(idx,),
kwargs={'padding_idx': 2, 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long)
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'mode': mode,
'per_sample_weights': per_sample_weights},)
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'max_norm': 1., 'norm_type': 1.0,
'mode': mode, 'offsets': offsets,
'per_sample_weights': per_sample_weights},)
if mode != 'max':
# Scale the gradient based on the inverse frequency of a particular index.
# Note : smax mode does not support sparse weights
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'scale_grad_by_freq': True, 'mode': mode,
'per_sample_weights': per_sample_weights},)
# gradcheck not implemented for sparse tensors.
# Note : max mode does not support sparse weights
idx = make_long_input((6, ), low=0, high=S)
weights = make_input((S, S))
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
idx = make_long_input((6, ), low=0, high=S)
idx[0] = 1 # freq more than 1
idx[1] = 1 # freq more than 1
idx[3] = 0 # padding_idx
weights = make_input((S, S)) * 2
per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx)
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0,
'max_norm': 1., 'offsets': offsets,
'mode': mode, 'per_sample_weights': per_sample_weights})
return list(generator())
def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)
def generator():
# 0-D index tensor
idx = make_long_input((), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
if not requires_grad:
# Following inputs return different gradient from the numerical gradient.
# This is expected and relevant tests are present in `test_nn.py`.
# The gradient vector at `padding_idx` is not updated.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 2
idx[1, 1] = 2
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 4
idx[1, 1] = 4
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)
# Due to inplace renorming of weight, the numerical gradient doesn't match the
# analytical gradient.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},)
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},)
# Scale the gradient based on the inverse frequency of a particular index.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)
# gradcheck not implemented for sparse tensors.
idx = make_long_input((2, 2), low=0, high=S)
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'sparse': True})
idx = make_long_input((3, 3), low=0, high=S)
idx[0, 0] = 1 # freq more than 1
idx[0, 1] = 1 # freq more than 1
idx[1, 0] = 0 # padding_idx
weights = make_input((S, S)) * 2
yield SampleInput(weights, args=(idx,),
kwargs={'sparse': True, 'scale_grad_by_freq': True,
'padding_idx': 0, 'max_norm': 1.})
return list(generator())
def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad)
shapes = ((), (S,), (L, M, S))
num_classess = (-1, 10)
return [
SampleInput(
make_input(
shape,
low=0,
high=10 if num_classes == -1 else num_classes // 2,
),
kwargs=dict(num_classes=num_classes),
)
for shape, num_classes in itertools.product(shapes, num_classess)
]
def sample_inputs_softplus(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad)
return [
SampleInput(make_input()),
SampleInput(make_input(), kwargs=dict(beta=3)),
SampleInput(make_input(low=1), kwargs=dict(threshold=1)),
]
def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs):
def make_input():
return make_fullrank_matrices_with_distinct_singular_values(12, 12, device=device, dtype=dtype)
# lhs / rhs shape can have any number of dimensions as long as their product equals 12
shapes = [
((2, 2, 3), (12, 1)),
((4, 3), (6, 1, 2)),
]
samples = []
for shape_lhs, shape_rhs in shapes:
inp = make_input().reshape(*shape_lhs, *shape_rhs).detach()
inp.requires_grad_(requires_grad)
samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs))))
return samples
def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs):
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
# Zero-dim tensors are not supported in NumPy, so we skip them for now.
# NumPy is used in reference check tests.
# See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix.
# a_shapes += [(0, 0, 1, 2, 3, 0)]
dimss = [None, (0, 2)]
def gen_inputs():
for a_shape, dims in itertools.product(a_shapes, dimss):
a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad)
b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(a, args=(b,), kwargs=dict(dims=dims))
return list(gen_inputs())
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shapes_and_kwargs = [
((), None),
((S,), dict(reduction="mean")),
((S,), dict(reduction="sum")),
((S,), dict(reduction="none")),
((S, S), None),
((S, S, S), None),
]
return [
SampleInput(_make_tensor(shape), args=(_make_tensor(shape),), kwargs=kwargs)
for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
batch_size = 2
num_channels = 3
modes = ("bilinear", "nearest")
align_cornerss = (False, True)
padding_modes = ("zeros", "border", "reflection")
sample_inputs = []
for dim in (2, 3):
modes_ = (*modes, "bicubic") if dim == 2 else modes
for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss):
sample_inputs.append(
SampleInput(
_make_tensor((batch_size, num_channels, *[S] * dim)),
args=(_make_tensor((batch_size, *[S] * dim, dim)),),
kwargs=dict(
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
)
)
return sample_inputs
def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def make_target(shape):
shape = () if len(shape) == 1 else (shape[0], )
t = torch.randint(0, 2, shape, device=device, dtype=torch.long)
# Label with -1 or 1
t = t * 2 - 1
target = t.to(dtype=dtype).detach()
return target
def gen_inputs():
shapes = ((S, S), (S,))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield SampleInput(
make_input(s),
args=(make_input(s), make_target(s)),
kwargs=dict(reduction=r, margin=random.uniform(-1, 1))
)
return list(gen_inputs())
def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs):
input_length = 50
batch = 16
num_char = 20
target_length = 30
def make_log_probs(s):
t = make_tensor(s, device=device, dtype=dtype)
log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad)
return log_probs
def gen_inputs():
reductions = ('none', 'mean', 'sum')
zero_inf = (True, False)
for r, z in product(reductions, zero_inf):
log_probs = make_log_probs((input_length, batch, num_char))
targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device)
input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device)
target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device)
yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z))
return list(gen_inputs())
def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
shape = (2, 3)
num_classes = shape[1]
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_weight = partial(make_tensor, shape=(num_classes,), device=device, dtype=dtype)
def make_target(shape, zeros=False):
s = (shape[0], *shape[2:]) if len(shape) > 1 else ()
if zeros:
return torch.zeros(s, device=device, dtype=torch.long)
else:
return make_tensor(s,
low=0,
high=shape[1] if len(shape) > 1 else shape[0],
device=device,
dtype=torch.long)
def gen_shape_kwargs():
# Batched, non-batched and 2d
shapes = (shape, (num_classes,), shape + (2, 2))
reductions = ('none', 'mean', 'sum')
for reduction, s in product(reductions, shapes):
yield make_input(s), make_target(s), dict(reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction)
yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction)
t = make_target(s)
ignore = num_classes // 2
# If "mean", nll returns NaN, so it's not differentiable at those points
if t.eq(ignore).all() and reduction == "mean":
t.fill_(0)
yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction)
# Test ignoring all the targets
# If "mean", nll returns NaN, so it's not differentiable at those points
if reduction != "mean":
yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction)
def gen_inputs():
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target,), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs):
def generator():
yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad))
mask = torch.tensor([[0, 1, 0, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[1, 0, 1, 1, 0],
[1, 0, 0, 1, 0]], dtype=torch.bool, device=device)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)
with torch.no_grad():
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True)
with torch.no_grad():
t[mask] = 0
yield SampleInput(t)
t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad)
yield SampleInput(t)
yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad))
yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))
return list(generator())
def _generate_sample_shape_reduction():
shapes = ((S,), (S, S), (S, S, S))
reductions = ('none', 'mean', 'sum')
for s, r in product(shapes, reductions):
yield s, r
def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_var = partial(make_tensor, low=0, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape(shape):
yield shape
# Broadcast
yield (*shape[:-1], 1)
yield shape[:-1]
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for t_s, v_s in product(gen_shape(s), gen_shape(s)):
yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(eps=random.uniform(1e-6, 1e-3), reduction=r)
)
yield (
_make_tensor(s), _make_tensor(t_s), make_var(v_s),
dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r)
)
def gen_inputs():
for input, target, var, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, var, ), kwargs=kwargs)
return list(gen_inputs())
def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
for s, r in _generate_sample_shape_reduction():
yield _make_tensor(s), _make_tensor(s), dict(reduction=r)
def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs):
def gen_inputs():
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
d['margin'] = random.uniform(-9, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
return list(gen_inputs())
def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs):
def gen_inputs():
for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs):
d['delta'] = random.uniform(1e-3, 9)
yield SampleInput(input, args=(target, ), kwargs=d)
return list(gen_inputs())
def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs):
_make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
def gen_shape_kwargs():
for s, r in _generate_sample_shape_reduction():
for li in (True, False):
for f in (True, False):
yield (
_make_tensor(s), _make_tensor(s),
dict(log_input=li, full=f, reduction=r)
)
yield (
_make_tensor(s), _make_tensor(s),
dict(log_input=li, full=f,
eps=random.uniform(1e-8, 1e-3),
reduction=r)
)
def gen_inputs():
for input, target, kwargs in gen_shape_kwargs():
yield SampleInput(input, args=(target, ), kwargs=kwargs)
return list(gen_inputs())
def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs):
make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
shape = (3,)
batched_shape = (2, *shape)
shapes_and_kwargs = [
(shape, None),
(batched_shape, None),
(shape, dict(keepdim=True)),
(batched_shape, dict(keepdim=True)),
(shape, dict(p=5.0)),
(shape, dict(p=-1.0)),
(shape, dict(eps=1.0)),
]
return [
SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs
]
def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(upscale_factor=upscale_factor),
)
for upscale_factor in (1, 3)
]
def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs):
return [
SampleInput(
make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad),
kwargs=dict(downscale_factor=downscale_factor),
)
for downscale_factor in (1, 3)
]
def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs):
samples = []
sample_shapes = [(), (S), (S, S, S)]
atols = [1e-2, 1e-16]
rtols = [1e-1, 0.5]
eps = 1e-8
for s, rtol, atol in product(sample_shapes, rtols, atols):
# close sample
t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
close = (t + atol).detach().requires_grad_(requires_grad)
close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(close_sample)
# random sample
a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad)
r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol))
samples.append(r_sample)
return samples
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo('exp'),
ForeachFuncInfo('acos'),
ForeachFuncInfo('asin'),
ForeachFuncInfo('atan'),
ForeachFuncInfo('cos'),
ForeachFuncInfo('cosh'),
ForeachFuncInfo('log'),
ForeachFuncInfo('log10'),
ForeachFuncInfo('log2'),
ForeachFuncInfo('tan'),
ForeachFuncInfo('tanh'),
ForeachFuncInfo('sin'),
ForeachFuncInfo('sinh'),
ForeachFuncInfo(
'neg',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
sample_inputs_func=sample_inputs_foreach,
safe_casts_outputs=False,
),
ForeachFuncInfo(
'sqrt',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
),
ForeachFuncInfo(
'ceil',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erf',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'erfc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'expm1',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'floor',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'log1p',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'round',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'frac',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'reciprocal',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'sigmoid',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
),
ForeachFuncInfo(
'trunc',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
'abs',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
safe_casts_outputs=False,
supports_forward_ad=True,
),
]
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"sub",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
),
ForeachFuncInfo(
"mul",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
ForeachFuncInfo(
"div",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
),
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
ForeachFuncInfo(
"addcdiv",
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
),
]
foreach_minmax_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"maximum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
ForeachFuncInfo(
"minimum",
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool),
),
]
foreach_reduce_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"norm",
dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
),
]
def reference_sign(x):
if x.dtype == np.bool_:
# `np.sign` doesn't support `bool`.
# >>> np.sign(True)
# ufunc 'sign' did not contain a loop
# with signature matching types dtype('bool') -> dtype('bool')
return np.sign(x, dtype=np.uint8).astype(np.bool_)
return np.sign(x)
def reference_sgn(x):
# NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.
# For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.
# while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)
if x.dtype not in [np.complex64, np.complex128]:
return reference_sign(x)
out = (x / np.abs(x))
if out.ndim == 0:
# Handle x == 0 case
if (x == 0):
# Can't assign to np.complex object
# So make a new one.
return np.array(complex(0, 0), dtype=x.dtype)
return out
# Handle x == 0 case
mask = (x == 0)
out[mask] = complex(0, 0)
return out
def reference_sigmoid(x):
# 'scipy.special.expit' not supported for the input types
if x.dtype in [np.complex64, np.complex128]:
return (1 / (1 + np.exp(-x)))
return scipy.special.expit(x)
def reference_logsigmoid(x):
return np.where(
x < 0,
x - np.log1p(np.exp(x)),
-np.log1p(np.exp(-x)))
def reference_hardsigmoid(x):
intermediate = x / 6 + 0.5
y = np.clip(intermediate, 0, None)
return np.where(y > 1, 1, y).astype(x.dtype)
def reference_lgamma(x):
# scipy.special.gammaln returns `-inf` when input is `-inf`.
# While Pytorch, C and C++, all return `inf` when input is `-inf`.
# Reference:
# https://en.cppreference.com/w/cpp/numeric/math/lgamma
# https://en.cppreference.com/w/c/numeric/math/lgamma
# To handle the above discrepancy,
# we replace -inf with inf so values
# that were originally -inf map to inf as expected
if x.dtype.kind == 'f':
x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)
out = scipy.special.gammaln(x)
if x.dtype == np.float16:
# `scipy.special.gammaln` returns output of float32 when input is float16,
# while `torch.lgamma` preserves `float16`. But due to smaller range of float16,
# Pytorch version outputs `inf` while SciPy returns finite values.
out = out.astype(np.float16)
return out
def reference_polygamma(x, n):
# WEIRD `scipy.special.polygamma` behavior
# >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype
# dtype('float64')
# >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype
# dtype('float32')
#
# Thus we cast output to the default torch dtype.
np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]
return scipy.special.polygamma(n, x).astype(np_dtype)
def reference_mvlgamma(x, d):
if x.dtype == np.float16:
return scipy.special.multigammaln(x, d).astype(np.float16)
return scipy.special.multigammaln(x, d)
def reference_softplus(input, beta=1, threshold=20):
non_linear = input * beta <= threshold
output = input.copy()
output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta
return output
def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray:
if num_classes == -1:
num_classes = int(np.amax(a) + 1)
idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes
one_hot = np.zeros((a.size, num_classes), dtype=a.dtype)
np.put(one_hot, idcs, 1)
return one_hot.reshape(*a.shape, -1)
def reference_mse_loss(input, target, reduction="mean"):
se = (input - target) ** 2
if reduction == "mean":
return np.mean(se)
elif reduction == "sum":
return np.sum(se)
else: # reduction == "none"
return se
def wrapper_set_seed(op, input, *args, **kwargs):
"""Wrapper to set seed manually for some functions like dropout
See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details.
"""
torch.manual_seed(42)
return op(input, *args, **kwargs)
def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5):
feature_size = np.prod(normalized_shape)
inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload]
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
if weight is None and bias is not None:
Y = Y + bias.reshape(-1)
elif weight is not None and bias is None:
Y = Y * weight.reshape(-1)
elif weight is not None and bias is not None:
Y = Y * weight.reshape(-1) + bias.reshape(-1)
return Y.reshape(*inp.shape)
def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5):
inp_view = inp
if np.prod(inp.shape) != 0:
inp_view = inp.reshape((inp.shape[0], num_groups, -1))
mean = inp_view.mean(axis=-1, keepdims=True)
var = inp_view.var(axis=-1, ddof=0, keepdims=True)
Y = (inp_view - mean) / np.sqrt(var + eps)
Y = Y.reshape(inp.shape)
if weight is not None:
# weight is a vector of length equal to the channel
if len(Y.shape) > 2:
weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:]))
Y = Y * weight
if bias is not None:
# bias is a vector of length equal to the channel
if len(Y.shape) > 2:
bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:]))
Y = Y + bias
return Y
# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't
# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into
# stacked 1D cases
def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None):
side = 'right' if (right or side == 'right') else 'left'
if len(sorted_sequence.shape) == 1 :
ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter)
return ret.astype(np.int32) if out_int32 else ret
elif sorted_sequence.shape[0] == 0:
if sorter is not None:
sorter = sorter.flatten()
ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter)
ret = ret.astype(np.int32) if out_int32 else ret
return ret.reshape(boundary.shape)
else:
# numpy searchsorted only supports 1D inputs so we split up ND inputs
orig_shape = boundary.shape
num_splits = np.prod(sorted_sequence.shape[:-1])
splits = range(0, num_splits)
sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1)
if sorter is not None:
sorter = sorter.reshape(num_splits, -1)
split_sequence = [sorted_sequence[i] for i in splits]
split_boundary = [boundary[i] for i in splits]
split_sorter = [sorter[i] if (sorter is not None) else None for i in splits]
split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort)
for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)]
split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret
return np.stack(split_ret).reshape(orig_shape)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.mH, *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrpper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
`idx` is used to specific which `args[idx]` is to be triangularized.
"""
triangular_arg = args[idx].triu() if upper else args[idx].tril()
return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs)
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get('mask')
if mask is not None:
output_mask = torch._masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def reference_reduction_numpy(f, supports_keepdims=True):
"""Wraps a NumPy reduction operator.
The wrapper function will forward dim, keepdim, mask, and identity
kwargs to the wrapped function as the NumPy equivalent axis,
keepdims, where, and initiak kwargs, respectively.
Args:
f: NumPy reduction operator to wrap
supports_keepdims (bool, optional): Whether the NumPy operator accepts
keepdims parameter. If it does not, the wrapper will manually unsqueeze
the reduced dimensions if it was called with keepdim=True. Defaults to True.
Returns:
Wrapped function
"""
@wraps(f)
def wrapper(x: np.ndarray, *args, **kwargs):
# Copy keys into a set
keys = set(kwargs.keys())
dim = kwargs.pop('dim', None)
keepdim = kwargs.pop('keepdim', False)
if 'dim' in keys:
dim = tuple(dim) if isinstance(dim, Sequence) else dim
# NumPy reductions don't accept dim=0 for scalar inputs
# so we convert it to None if and only if dim is equivalent
if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}:
kwargs['axis'] = None
else:
kwargs['axis'] = dim
if 'keepdim' in keys and supports_keepdims:
kwargs['keepdims'] = keepdim
if 'mask' in keys:
mask = kwargs.pop('mask')
if mask is not None:
kwargs['where'] = mask.cpu().numpy()
if 'identity' in keys:
identity = kwargs.pop('identity')
if identity is not None:
if identity.dtype is torch.bfloat16:
identity = identity.cpu().to(torch.float32)
else:
identity = identity.cpu()
kwargs['initial'] = identity.numpy()
if 'unbiased' in keys:
unbiased = kwargs.pop('unbiased')
if unbiased is not None:
kwargs['ddof'] = int(unbiased)
result = f(x, *args, **kwargs)
# Unsqueeze reduced dimensions if NumPy does not support keepdims
if keepdim and not supports_keepdims and x.ndim > 0:
dim = list(range(x.ndim)) if dim is None else dim
result = np.expand_dims(result, dim)
return result
return wrapper
def reference_std_var(f):
"""Forwards unbiased/correction kwargs as NumPy's equivalent ddof"""
g = reference_reduction_numpy(f)
@wraps(g)
def wrapper(x: np.ndarray, *args, **kwargs):
assert not ('unbiased' in kwargs and 'correction' in kwargs)
if 'unbiased' in kwargs:
kwargs['ddof'] = int(kwargs.pop('unbiased'))
elif 'correction' in kwargs:
kwargs['ddof'] = kwargs.pop('correction')
return g(x, *args, **kwargs)
return wrapper
def generate_std_var_kwargs(t: torch.Tensor, **kwargs):
"""Generates unbiased/correction kwargs for std/var operators"""
yield ((), {'unbiased': True})
yield ((), {'unbiased': False})
# Currently, calling std with correction is only enabled when
# both dim and keepdim are provided.
if 'dim' in kwargs and 'keepdim' in kwargs:
yield ((), {'correction': 0})
yield ((), {'correction': 1})
numel = torch.tensor(t.shape)[kwargs.get('dim')].prod()
yield ((), {'correction': numel // 2})
def ref_pairwise_distance(input1, input2):
pass
# Operator database (sorted alphabetically)
op_db: List[OpInfo] = [
UnaryUfuncInfo('abs',
aliases=('absolute', ),
ref=np.abs,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',
dtypes=[torch.cfloat, torch.cdouble]),
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
),
supports_inplace_autograd=False,
assert_autodiffed=True,
supports_sparse_csr=True,
supports_forward_ad=True),
# NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)
UnaryUfuncInfo('acos',
aliases=('arccos', ),
ref=np.arccos,
domain=(-1, 1),
handles_complex_extremals=False,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-1,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
aliases=('arccosh', ),
ref=np.arccosh,
domain=(1, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "rsqrt_cuda" not implemented for 'BFloat16'
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
),
# acosh is not defined at x < 1 (real) or |z| < 1 (complex)
reference_numerics_filter=NumericsFilter(
condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1),
safe_val=2)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \
else np.add(input, np.multiply(alpha, other)),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values',
dtypes=(torch.complex64, torch.complex128)),
)),
BinaryUfuncInfo('mul',
aliases=('multiply',),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True)),
BinaryUfuncInfo('sub',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)),
aliases=('subtract',),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_add_sub, alpha=2, python_scalars=True),
supports_inplace_autograd=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}),
'TestBinaryUfuncs', 'test_reference_numerics'),
),
skips=(
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics',
dtypes=(torch.uint8,)),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.uint8,)),
)),
OpInfo('addmm',
# This addmm OpInfo is for when alpha and beta are not both equal to 1.
# alpha=beta=1 is tested in the following opinfo, because that special case will
# trigger addmm being decomposed by a jit pass.
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_addmm),
OpInfo('addmm',
# When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.
variant_test_name='decomposed',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_inplace_autograd=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],
sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),
OpInfo('addmv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_addmv),
OpInfo('addbmm',
ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M),
np.multiply(np.asarray(alpha, dtype=batch1.dtype),
np.sum(np.matmul(batch1, batch2), axis=0))),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
dtypesIfROCM=floating_types_and(torch.half),
backward_dtypesIfROCM=floating_types_and(torch.half),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05),
torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_reference_testing')],
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# addbmm does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# https://github.com/pytorch/pytorch/issues/55907
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,
*[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if SM53OrLater else [],
torch.complex64, torch.complex128),
supports_forward_ad=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view', device_type='cuda')],
sample_inputs_func=sample_inputs_baddbmm),
OpInfo('dot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('vdot',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_dot_vdot,
supports_forward_ad=True,
),
OpInfo('bmm',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
# Reference: https://github.com/pytorch/pytorch/issues/50747
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('addcmul',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_inplace_autograd=False,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
DecorateInfo(unittest.skip("Skipped!"),
'TestCommon',
'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
ref=np.arcsin,
domain=(-1, 1),
supports_sparse=True,
supports_sparse_csr=True,
supports_forward_ad=True,
safe_casts_outputs=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}),
'TestUnaryUfuncs', device_type='cuda'),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
aliases=('arcsinh', ),
ref=np.arcsinh,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
ref=np.arctan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('atan2',
aliases=('arctan2',),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_atan2,
),
UnaryUfuncInfo('atanh',
aliases=('arctanh', ),
ref=np.arctanh,
domain=(-1, 1),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
supports_inplace_autograd=False,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.cfloat]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.cfloat],
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('allclose',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=np.allclose,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_allclose,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_broadcast_to),
OpInfo('broadcast_tensors',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_broadcast_tensors),
OpInfo('block_diag',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_block_diag),
OpInfo('bitwise_and',
dtypes=integral_types_and(torch.bool),
supports_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise),
UnaryUfuncInfo('bitwise_not',
ref=np.bitwise_not,
dtypes=integral_types_and(torch.bool),
supports_autograd=False),
OpInfo('bitwise_left_shift',
op=torch.bitwise_left_shift,
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('bitwise_right_shift',
op=torch.bitwise_right_shift,
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_bitwise_shift),
OpInfo('combinations',
op=torch.combinations,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_autograd=False,
supports_out=False,
sample_inputs_func=sample_inputs_combinations),
OpInfo('cartesian_prod',
op=torch.cartesian_prod,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_autograd=False,
supports_out=False,
sample_inputs_func=sample_inputs_cartesian_prod,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"),
'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)),
OpInfo('cdist',
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_cdist,
skips=(
# RuntimeError: _cdist_backward requires X1 to be contiguous
DecorateInfo(unittest.skip("_cdist_backward requires X1 to be contiguous"),
'TestCommon', 'test_noncontiguous_samples'),
)
),
UnaryUfuncInfo('ceil',
ref=np.ceil,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
OpInfo('cholesky',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_types(),
# TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs
# with complex dtype.
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_cholesky_inverse,
gradcheck_wrapper=gradcheck_wrapper_triangular_input,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True
# for complex tensors
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# cholesky_inverse does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),)),
OpInfo('cholesky_solve',
op=torch.cholesky_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_cholesky_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# cholesky_solve does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),),),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_chunk,
supports_forward_ad=True,
supports_out=False),
OpInfo('clone',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_clone,
supports_forward_ad=True,
supports_out=False),
OpInfo('contiguous',
op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_contiguous,
supports_forward_ad=True,
autodiff_fusible_nodes=['aten::contiguous'],
assert_jit_shape_analysis=True,
supports_out=False),
OpInfo('sum_to_size',
op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sum_to_size,
supports_forward_ad=True,
supports_out=False,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),),
OpInfo('symeig',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_symeig,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
# NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors
OpInfo('clamp',
aliases=('clip',),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_clamp),
UnaryUfuncInfo('clamp',
variant_test_name='scalar',
aliases=('clip', ),
decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),
ref=np.clip,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/54841
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
),
sample_kwargs=sample_kwargs_clamp_scalar,
sample_inputs_func=sample_inputs_clamp_scalar),
UnaryUfuncInfo('positive',
ref=np.positive,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
),
UnaryUfuncInfo('conj',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_sparse=True,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('conj_physical',
ref=np.conj,
dtypes=all_types_and_complex_and(torch.bool,
torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# RuntimeError: inputSet && outputSet
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"),
'TestSparseUnaryUfuncs', 'test_inplace'),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('resolve_neg',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_real,
supports_forward_ad=True,
supports_out=False,
),
OpInfo('view_as_real',
dtypes=complex_types(),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_inputs_view_as_real,
test_conjugated_samples=False,
),
OpInfo('view_as_complex',
dtypes=floating_types_and(torch.half),
supports_out=False,
supports_forward_ad=True,
test_neg_view=False,
sample_inputs_func=sample_inputs_view_as_complex,
skips=(
# RuntimeError: Tensor must have a last dimension with stride 1
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"),
)),
OpInfo('complex',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_complex,
supports_forward_ad=True,
),
OpInfo('copysign',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_copysign,
supports_inplace_autograd=False,
supports_forward_ad=True,
),
OpInfo('corrcoef',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_corrcoef,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('cos',
ref=np.cos,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu',
dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_cov,
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cross',
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True),
OpInfo('linalg.cross',
ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim),
op=torch.linalg.cross,
dtypes=all_types_and_complex(),
dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
aten_name='linalg_cross',
sample_inputs_func=sample_inputs_cross,
supports_forward_ad=True),
OpInfo('cumsum',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumsum does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# cumprod does not handle correctly out= dtypes
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
gradcheck_fast_mode=False),
OpInfo('cummax',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('cummin',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
UnaryUfuncInfo('deg2rad',
ref=np.radians,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
OpInfo('diff',
op=torch.diff,
# np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append
# are set as None when converting to numpy
ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: (
np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append)
),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diff),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='no_rounding_mode',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='trunc_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="trunc", python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('div',
aliases=('divide',),
variant_test_name='floor_rounding',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_binary_pwise, rounding_mode="floor", python_scalars=True),
supports_forward_ad=True,
promotes_int_to_float=True,
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
BinaryUfuncInfo('true_divide',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
promotes_int_to_float=True,
sample_inputs_func=sample_inputs_binary_pwise,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
UnaryUfuncInfo('exp',
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True),
OpInfo('expand',
op=lambda self, shape: self.expand(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_expand,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
supports_out=False),
OpInfo('expand_as',
op=lambda self, other: self.expand_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_expand_as,
supports_out=False),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diag),
OpInfo('diag_embed',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_diag_embed),
OpInfo('diagonal_scatter',
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_diagonal_scatter),
OpInfo('eq',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('fmax',
op=torch.fmax,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmin',
op=torch.fmin,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,),
OpInfo('fmod',
ref=np.fmod,
dtypes=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('fmod',
ref=np.fmod,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool),
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True)),
OpInfo('remainder',
ref=np.remainder,
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_fmod_remainder),
OpInfo('remainder',
ref=np.remainder,
variant_test_name='autodiffed',
dtypes=all_types_and(torch.float16, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bool, torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,
sample_inputs_func=partial(sample_inputs_fmod_remainder, autodiffed=True),
decorators=(
# Fails on XLA
# False is not true : Tensors failed to compare as equal!
# Attempted to compare equality of tensors with different dtypes
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
)),
UnaryUfuncInfo('frac',
ref=lambda x: np.modf(x)[0],
dtypes=floating_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
# Reference for disabling extremals
# https://github.com/pytorch/pytorch/issues/51948
handles_extremals=False),
SpectralFuncInfo('fft.fft',
aten_name='fft_fft',
ref=np.fft.fft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
),
SpectralFuncInfo('fft.fft2',
aten_name='fft_fft2',
ref=np.fft.fft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo('fft.fftn',
aten_name='fft_fftn',
ref=np.fft.fftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[precisionOverride(
{torch.float: 1e-4, torch.cfloat: 1e-4})],
),
SpectralFuncInfo('fft.hfft',
aten_name='fft_hfft',
ref=np.fft.hfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.hfft2',
aten_name='fft_hfft2',
ref=scipy.fft.hfft2 if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.hfftn',
aten_name='fft_hfftn',
ref=scipy.fft.hfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.rfft',
aten_name='fft_rfft',
ref=np.fft.rfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False),
SpectralFuncInfo('fft.rfft2',
aten_name='fft_rfft2',
ref=np.fft.rfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.rfftn',
aten_name='fft_rfftn',
ref=np.fft.rfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[precisionOverride({torch.float: 1e-4})],),
SpectralFuncInfo('fft.ifft',
aten_name='fft_ifft',
ref=np.fft.ifft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types()),
SpectralFuncInfo('fft.ifft2',
aten_name='fft_ifft2',
ref=np.fft.ifft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ifftn',
aten_name='fft_ifftn',
ref=np.fft.ifftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfft',
aten_name='fft_ihfft',
ref=np.fft.ihfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False),
SpectralFuncInfo('fft.ihfft2',
aten_name='fft_ihfft2',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.ihfftn',
aten_name='fft_ihfftn',
ref=scipy.fft.ihfftn if has_scipy_fft else None,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and(torch.bool),
default_test_dtypes=floating_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 2e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfft',
aten_name='fft_irfft',
ref=np.fft.irfft,
ndimensional=SpectralFuncType.OneD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False),
SpectralFuncInfo('fft.irfft2',
aten_name='fft_irfft2',
ref=np.fft.irfft2,
ndimensional=SpectralFuncType.TwoD,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
SpectralFuncInfo('fft.irfftn',
aten_name='fft_irfftn',
ref=np.fft.irfftn,
ndimensional=SpectralFuncType.ND,
dtypes=all_types_and_complex_and(torch.bool),
default_test_dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
decorators=[
DecorateInfo(
precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),
'TestFFT', 'test_reference_nd')],
),
OpInfo('fft.fftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),
supports_out=False,
supports_forward_ad=True,
),
OpInfo('fft.ifftshift',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_fftshift(*a, **kw)),
supports_out=False,
supports_forward_ad=True,
),
OpInfo('stft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! stft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_stft(*a, **kw)),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
),
OpInfo('istft',
decorators=[
skipCPUIfNoFFT,
DecorateInfo(unittest.skip("Skipped! istft does not match the native function"),
'TestJit', 'test_variant_consistency_jit'),
# gradcheck fails on ROCm (gh-68429)
DecorateInfo(skipCUDAIfRocm, 'TestGradients', 'test_fn_grad'),
],
dtypes=floating_and_complex_types(),
sample_inputs_func=lambda *a, **kw: list(sample_inputs_istft(*a, **kw)),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_out=False,
),
UnaryUfuncInfo('floor',
ref=np.floor,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
OpInfo('flip',
op=torch.flip,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_flip,
supports_forward_ad=True,
supports_out=False),
OpInfo('fliplr',
op=torch.fliplr,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
OpInfo('flipud',
op=torch.flipud,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_fliplr_flipud,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('i0',
ref=np_unary_ufunc_integer_promotion_wrapper(
scipy.special.i0) if TEST_SCIPY else _NOTHING,
aliases=('special.i0',),
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 5e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_i0_i1),
UnaryUfuncInfo('special.i0e',
aten_name='special_i0e',
ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.bfloat16: 3e-1,
torch.float16: 3e-1}),),
backward_dtypesIfCPU=floating_types(),
backward_dtypesIfCUDA=floating_types(),
backward_dtypesIfROCM=floating_types(),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.i1',
aten_name='special_i1',
ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True,
decorators=(
DecorateInfo(toleranceOverride({
torch.float32: tol(atol=1e-4, rtol=0),
torch.bool: tol(atol=1e-4, rtol=0)})),
),
skips=(
# TODO: FIXME: jiterator does not support casting to complex outs
DecorateInfo(unittest.skip("FIXME: Jiterator does not support complex outs!"),
"TestUnaryUfuncs",
"test_out_arg_all_dtypes",
device_type='cuda'),
)),
UnaryUfuncInfo('special.i1e',
aten_name='special_i1e',
ref=scipy.special.i1e if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool),
sample_inputs_func=sample_inputs_i0_i1,
safe_casts_outputs=True),
UnaryUfuncInfo('special.ndtr',
aten_name='special_ndtr',
decorators=(precisionOverride({torch.bfloat16: 5e-3,
torch.float16: 5e-4}),),
ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16),
safe_casts_outputs=True),
BinaryUfuncInfo('floor_divide',
dtypes=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
rhs_make_tensor_kwargs=dict(exclude_zero=True),
),
UnaryUfuncInfo('frexp',
op=torch.frexp,
ref=np.frexp,
dtypes=floating_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
# skip testing torch.frexp as it is not supported by ROCm platform yet
decorators=[],
supports_out=False,
supports_forward_ad=True,
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=IS_WINDOWS),
)),
OpInfo('ge',
aliases=('greater_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('geqrf',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_geqrf,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),
OpInfo('gt',
aliases=('greater',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
UnaryUfuncInfo('imag',
ref=np.imag,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
# RuntimeError: view_as_real doesn't work on unresolved conjugated tensors.
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
torch.int32, torch.int64,
torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=(
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient),
OpInfo('inverse',
op=torch.inverse,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('isin',
dtypes=all_types(),
dtypesIfCUDA=all_types_and(torch.half),
supports_autograd=False,
sample_inputs_func=sample_inputs_isin,
skips=(
# https://github.com/pytorch/pytorch/issues/67432
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), # noqa: B950
)),
OpInfo('kthvalue',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kthvalue,
error_inputs_func=error_inputs_kthvalue),
OpInfo('le',
aliases=('less_equal',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('linalg.det',
op=torch.linalg.det,
aliases=('det', ),
dtypes=floating_and_complex_types(),
backward_dtypes=floating_and_complex_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67512
DecorateInfo(unittest.skip("67512"), 'TestCommon', 'test_noncontiguous_samples'),
)),
OpInfo('linalg.det',
op=torch.linalg.det,
variant_test_name='singular',
aliases=('det', ),
dtypes=double_types(),
backward_dtypes=double_types(),
aten_name='linalg_det',
sample_inputs_func=sample_inputs_linalg_det_singular,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, skipCUDAIfRocm],
supports_inplace_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67512
DecorateInfo(unittest.skip("67512"), 'TestCommon', 'test_noncontiguous_samples'),
# Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed
# Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cuda',
dtypes=(torch.complex128,)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
# This test fails because singular inputs cannot be reliably
# generated unless we're using double types
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_dtypes'),
DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_backward',
dtypes=(torch.float32, torch.complex64,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
dtypes=floating_and_complex_types(),
# TODO: RuntimeError: While computing batched gradients,
# got: vmap: Calling Tensor.as_strided is not supported
# unless the batch dims being vmapped over are at the front of the tensor (in memory layout).
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_cholesky,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.cond',
aten_name='linalg_cond',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_cond,
check_batched_gradgrad=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('linalg.eig',
aten_name='linalg_eig',
op=torch.linalg.eig,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_eig,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Disabled due to https://github.com/pytorch/pytorch/issues/67367
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),),
),
OpInfo('linalg.eigvals',
aten_name='linalg_eigvals',
op=torch.linalg.eigvals,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Disabled due to https://github.com/pytorch/pytorch/issues/67367
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'),
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.eigh',
aten_name='linalg_eigh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# See: https://github.com/pytorch/pytorch/issues/67367
# This DecorateInfo should change to `dtypes=complex_dtypes()` after the above
# has been resolved.
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=floating_and_complex_types()),),
),
OpInfo('linalg.eigvalsh',
aten_name='linalg_eigvalsh',
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
sample_inputs_func=sample_inputs_linalg_eigh,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Gradcheck for complex is not implemented yet
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('linalg.householder_product',
aten_name='linalg_householder_product',
op=torch.linalg.householder_product,
aliases=('orgqr', ),
dtypes=floating_and_complex_types(),
# TODO: backward uses in-place operations that vmap doesn't like
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_householder_product,
decorators=[
skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,
DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})),
]),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
dtypes=floating_and_complex_types(),
supports_out=True,
sample_inputs_func=sample_inputs_linalg_lstsq,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# we skip gradient checks for this suite as they are tested in
# variant_test_name='grad_oriented'
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
)),
OpInfo('linalg.lstsq',
aten_name='linalg_lstsq',
variant_test_name='grad_oriented',
# gradchecks for forward AD fails with multi-Tensor outputs
op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0],
supports_out=False,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_lstsq,
supports_autograd=True,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
# tests do not work with passing lambda for op
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_matrix_power',
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
supports_forward_ad=True,
check_batched_grad=False,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],
sample_inputs_func=sample_inputs_linalg_matrix_power,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),),
OpInfo('linalg.multi_dot',
# Need this lambda because gradcheck does not work with TensorList inputs
aten_name='linalg_multi_dot',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
supports_inplace_autograd=False,
# Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_multi_dot,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples'),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
)),
OpInfo('linalg.norm',
op=torch.linalg.norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_norm,
aten_name='linalg_norm',
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
dtypes=floating_and_complex_types(),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
)),
OpInfo('linalg.qr',
aten_name='linalg_qr',
op=torch.linalg.qr,
dtypes=floating_and_complex_types(),
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_linalg_qr,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.slogdet',
aten_name='linalg_slogdet',
op=torch.linalg.slogdet,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_slogdet,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
)),
OpInfo('linalg.vector_norm',
op=torch.linalg.vector_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
sample_inputs_func=sample_inputs_linalg_vector_norm,
aten_name='linalg_vector_norm'),
UnaryUfuncInfo('log',
ref=np.log,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log10',
ref=np.log10,
domain=(0, None),
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_WINDOWS),
),
# log10(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
aliases=('special.log1p',),
domain=(-1, None),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
UnaryUfuncInfo('log2',
ref=np.log2,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
),
# log2(z)->-inf for |z|->0
reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)),
BinaryUfuncInfo('ldexp',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_binary_pwise,
promotes_int_to_float=True,
supports_out=True,
skips=(
# RuntimeError: mul(): functions with out=... arguments don't support
# automatic differentiation, but one of the arguments requires grad
# https://github.com/pytorch/pytorch/issues/68966
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'),
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# FIXME: ldexp does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
),
decorators=[
DecorateInfo(
toleranceOverride({
torch.complex64: tol(atol=1e-05, rtol=1e-05)
}),
'TestCommon', device_type='cpu',
),
], ),
OpInfo('logaddexp',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
OpInfo('logaddexp2',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:
(SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),
args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),
UnaryUfuncInfo('logical_not',
ref=np.logical_not,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_autograd=False,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# The function variant always returns BoolTensor
# while the inplace variant preserves the input dtype.
# >>> t = torch.randn(3)
# >>> torch.logical_not(t)
# tensor([False, False, False])
# >>> torch.logical_not(t).dtype
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
OpInfo('lt',
aliases=('less',),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('lu',
op=torch.lu,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_lu,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'lu'.:
# File "<string>", line 3
# def the_method(i0):
# return i0.lu(True, True)
# ~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
dtypes=floating_and_complex_types(),
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_lu_solve,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# RuntimeError: lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
)),
OpInfo('lu_unpack',
op=torch.lu_unpack,
dtypes=floating_and_complex_types(),
supports_inplace_autograd=False,
# we use in-place operations which cannot be avoided.
# This causes vmap failures, hence we skip batched gradient checks
check_batched_grad=False,
supports_out=True,
sample_inputs_func=sample_inputs_lu_unpack,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# LU_pivots is expected to be a contiguous tensor
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950
# cuda gradchecks are slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda'),
)),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_fill,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_scatter,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('masked_select',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_masked_select),
OpInfo('matrix_exp',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
aliases=('linalg.matrix_exp',),
sample_inputs_func=sample_inputs_matrix_exp,
# Needs to construct a 2nx2n matrix by copy_ ing into it
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
),
OpInfo('matmul',
aliases=('linalg.matmul',),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_matmul,
skips=(
# ROCm intermittently fails the test with standard atol/rtol
DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}),
'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ROCM),
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestOpInfo',
device_type='xla', dtypes=(torch.long,)),
)),
OpInfo('max',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True),
OpInfo('max',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('median',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of median do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('nanmedian',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
# TODO: some signatures of nanmedian do support out
supports_out=False,
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)),
OpInfo('var_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of var_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/67539
DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ASAN, device_type='cpu'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: review with var_mean tests in test_autograd.py
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False),
backward_dtypes=floating_types_and(torch.half),
backward_dtypesIfCPU=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.half),
# TODO: some signatures of std_mean do support out
supports_out=False,
supports_forward_ad=True,
skips=(
# https://github.com/pytorch/pytorch/issues/67539
DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples',
active_if=TEST_WITH_ASAN, device_type='cpu'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: fix along with var_mean autograd tests
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
ref=np.meshgrid,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_forward_ad=True),
OpInfo('meshgrid',
variant_test_name='list_of_tensors',
# Unlike the variant above, we do not use np.meshgrid as a
# ref since it does not officially support list of numpy
# arrays.
dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16),
sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'),
skips=[
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
autodiff_nonfusible_nodes=[],
supports_forward_ad=True),
OpInfo('min',
variant_test_name='reduction_with_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_reduction_with_dim,
supports_forward_ad=True),
OpInfo('min',
variant_test_name='reduction_no_dim',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),
OpInfo('quantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
OpInfo('nanquantile',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_reduction_quantile,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)),
BinaryUfuncInfo(
'max',
aliases=('maximum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,
ref=np.maximum,
skips=(
# FIXME: maximum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'maximum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,
ref=np.maximum,
skips=(
# FIXME: maximum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'min',
aliases=('minimum',),
variant_test_name='binary',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_max_min_binary,
supports_forward_ad=True,
assert_autodiffed=True,
ref=np.minimum,
skips=(
# FIXME: min does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo(
'minimum',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_max_min_binary,
ref=np.minimum,
skips=(
# FIXME: minimum does not accept scalar inputs
DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
# TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
),
),
BinaryUfuncInfo('logical_and',
ref=np.logical_and,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_and does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('logical_or',
ref=np.logical_or,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_or does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('logical_xor',
ref=np.logical_xor,
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
always_returns_bool=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# FIXME: logical_xor does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('bitwise_or',
ref=np.bitwise_or,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('bitwise_xor',
ref=np.bitwise_xor,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half'
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'),
)),
BinaryUfuncInfo('heaviside',
ref=lambda a, b: (
# necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64
np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b)
),
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
# FIXME: heaviside does not accept scalar inputs
skips=(
# NumPy's heaviside promotes bool to float16
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_heavisidel',
dtypes=(torch.bool,)),
# RuntimeError: heaviside is not yet implemented for tensors with different dtypes.
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
# PyTorch's heaviside does not appear to propagate NaNs
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('lcm',
ref=np.lcm,
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
# TODO: FIXME: lcm doesn't support scalars
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_broadcast_python_scalar'),
)),
BinaryUfuncInfo('gcd',
ref=np.gcd,
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_binary_pwise,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_reference_numerics_small_values',
dtypes=(torch.int8,)),
# TODO: FIXME: jiterator doesn't support non-tensor inputs
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_broadcast_python_scalar'),
# TODO: FIXME: jiterator doesn't support casts to unsupported types
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion',
device_type='cuda'))),
BinaryUfuncInfo('isclose',
ref=np.isclose,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_isclose,
supports_autograd=False,
supports_out=False,
skips=(
# RuntimeError: Short did not match Int
DecorateInfo(unittest.expectedFailure,
'TestBinaryUfuncs',
'test_type_promotion'),
DecorateInfo(unittest.skip("Skipped!"),
'TestBinaryUfuncs',
'test_reference_numerics_extremal_values'),
# FIXME: isclose does not accept scalar inputs
DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_broadcast_python_scalar'),
)),
# `softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
aten_name='softmax',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
supports_out=False),
OpInfo('softmax',
aliases=('special.softmax', 'nn.functional.softmax',),
variant_test_name="with_dtype",
aten_name='softmax',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True,
supports_out=False),
# `softmin` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
# https://github.com/pytorch/pytorch/issues/68752
OpInfo('nn.functional.softmin',
aten_name='softmin',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_jit_shape_analysis=False,
assert_autodiffed=False,
supports_out=False),
OpInfo('nn.functional.softmin',
variant_test_name="with_dtype",
aten_name='softmin',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=False,
supports_out=False),
OpInfo(
"nn.functional.cross_entropy",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cross_entropy,
supports_out=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}),
"TestJit",
"test_variant_consistency_jit",
device_type="cpu",
),
),
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536
# test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked
# 1536 bytes CUDA memory on device 0
DecorateInfo(
unittest.expectedFailure,
"TestJit",
"test_variant_consistency_jit",
device_type="cuda",
),
)
),
OpInfo('nn.functional.normalize',
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_normalize),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
dtypes=all_types_and(torch.bool),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(onlyNativeDeviceTypes,),
supports_autograd=False,
sample_inputs_func=sample_inputs_aminmax),
OpInfo('as_strided',
op=lambda x, size, stride, storage_offset=0:
torch.as_strided(x, size, stride, storage_offset=storage_offset),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_as_strided,
skips=(
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
# With rtol=1e-07 and atol=1e-07, found 1 element(s) (out of 1) whose difference(s)
# exceeded the margin of error (including 0 nan comparisons). The greatest difference
# was 1.0 (1.0 vs. -0.0), which occurred at index 0.
DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'),
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# AssertionError: False is not true : Scalars failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'),)),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_cosine_similarity),
OpInfo('nn.functional.adaptive_avg_pool1d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool1d),
OpInfo('nn.functional.adaptive_avg_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool2d),
OpInfo('nn.functional.adaptive_avg_pool3d',
dtypes=floating_types_and(torch.half),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_avg_pool3d),
OpInfo('nn.functional.adaptive_max_pool1d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool1d),
OpInfo('nn.functional.adaptive_max_pool2d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, int]'. :
# File "<string>", line 3
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool2d),
OpInfo('nn.functional.adaptive_max_pool3d',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
decorators=(
# RuntimeError:
# adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor):
# Expected a value of type 'List[int]' for argument 'output_size' but
# instead found type 'Tuple[NoneType, NoneType, NoneType]'. :
# File "<string>", line 3
#
# def the_method(i0):
# return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
#
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_adaptive_max_pool3d),
OpInfo('nn.functional.avg_pool1d',
aten_name='avg_pool1d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool1d),
OpInfo('nn.functional.avg_pool3d',
aten_name='avg_pool3d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_avgpool3d),
OpInfo('nn.functional.relu',
aten_name="relu",
supports_autograd=True,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_activation_relu,
supports_out=False,
supports_forward_ad=True),
OpInfo('nn.functional.conv_transpose1d',
aten_name='conv_transpose1d',
aliases=('conv_transpose1d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose1d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose2d',
aten_name='conv_transpose2d',
aliases=('conv_transpose2d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose2d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv_transpose3d',
aten_name='conv_transpose3d',
aliases=('conv_transpose3d',),
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv_transpose3d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_variant_consistency_eager', device_type='cuda'),
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }),
'TestCommon', 'test_noncontiguous_samples', device_type='cuda')],
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv1d',
aliases=('conv1d',),
aten_name='conv1d',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=sample_inputs_conv1d,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.conv2d',
aliases=('conv2d',),
aten_name='conv2d',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
sample_inputs_func=partial(sample_inputs_conv2d),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.group_norm',
aten_name='group_norm',
aliases=('group_norm',),
ref=reference_group_norm,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_group_norm,),
OpInfo('nn.functional.instance_norm',
# no ref because instance_norm will often have numerical instability (large numbers or nan)
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_instance_norm,),
OpInfo('nn.functional.layer_norm',
aten_name='layer_norm',
aliases=('layer_norm',),
ref=reference_layer_norm,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}),
'TestCommon', 'test_reference_testing'
)
],
sample_inputs_func=sample_inputs_layer_norm,),
OpInfo('nn.functional.local_response_norm',
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,))
],
sample_inputs_func=sample_inputs_local_response_norm,),
OpInfo('nn.functional.pad',
variant_test_name='constant',
aten_name='constant_pad_nd',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'),
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='reflect',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='replicate',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
OpInfo('nn.functional.pad',
variant_test_name='circular',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'),
supports_forward_ad=True,
check_batched_grad=False,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: falseINTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
aten_name="hardswish",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_hardswish,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=False,
supports_forward_ad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_unfold,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='nearest',
supports_autograd=True,
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='linear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='bicubic',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='trilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
aten_name="interpolate",
variant_test_name='area',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_interpolate, 'area'),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_bilinear',
supports_autograd=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.upsample_nearest',
supports_autograd=True,
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.uint8),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample, 'nearest'),
skips=(
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.leaky_relu',
aliases=None,
aten_name="leaky_relu",
sample_inputs_func=sample_inputs_leaky_relu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::leaky_relu"]),
OpInfo('nn.functional.avg_pool2d',
aten_name='avg_pool2d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types_and(torch.int64),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_avgpool2d),
OpInfo('nn.functional.fractional_max_pool2d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool2d,
decorators=[
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# FIXME: produces incorrect output on non-contiguous inputs
# https://github.com/pytorch/pytorch/issues/69325
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
], ),
OpInfo('nn.functional.fractional_max_pool3d',
supports_autograd=True,
supports_out=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
test_neg_view=False,
sample_inputs_func=sample_inputs_fractional_max_pool3d,
decorators=[
# FIXME: both derivatives are implemented incorrectly
# https://github.com/pytorch/pytorch/issues/69322
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'),
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# FIXME: produces incorrect output on non-contiguous inputs
# https://github.com/pytorch/pytorch/issues/69325
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'),
# FIXME: AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
], ),
OpInfo('nn.functional.max_pool1d',
aten_name='max_pool1d',
supports_autograd=True,
supports_out=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool2d',
aten_name='max_pool2d',
supports_autograd=True,
# Vmap is not happy with non-contiguous (channels_last) inputs
check_batched_gradgrad=False,
supports_out=False,
assert_jit_shape_analysis=True,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.max_pool3d',
aten_name='max_pool3d',
supports_autograd=True,
supports_out=False,
# TODO: add shape checks
assert_jit_shape_analysis=False,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
# TODO: investigate nondeterminism
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=sample_inputs_max_pool),
OpInfo('nn.functional.linear',
aten_name='linear',
supports_autograd=True,
sample_inputs_func=sample_inputs_linear,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16,
*[torch.bfloat16] if CUDA11OrLater else []),
# linear calls mm under the hood which is nondeterministic on CUDA
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False),
OpInfo('nn.functional.bilinear',
aten_name='bilinear',
supports_autograd=True,
sample_inputs_func=sample_inputs_bilinear,
dtypes=all_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+ and SM53+
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
),
supports_forward_ad=False,
supports_out=False),
OpInfo('nn.functional.glu',
aten_name='glu',
supports_autograd=True,
sample_inputs_func=sample_inputs_glu,
dtypes=floating_types(),
dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_out=False),
UnaryUfuncInfo(
'nn.functional.elu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.elu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
OpInfo(
'nn.functional.prelu',
ref=lambda x, weight:
np.maximum(0., x) + np.minimum(0., x) *
(weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_nn_functional_prelu,
decorators=[
# FIXME: second derivative is implemented but seems to be incorrect
# https://github.com/pytorch/pytorch/issues/68760
DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'),
# RuntimeError: Cannot insert a Tensor that requires grad as a constant.
# Consider making it a parameter or input, or detaching the gradient
# https://github.com/pytorch/pytorch/issues/68752
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ],
),
UnaryUfuncInfo(
'nn.functional.celu',
ref=lambda x, alpha=1.0, inplace=False:
np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'alpha': 0.8}, {'alpha': 0.8}),
inplace_variant=lambda x, alpha=1.0:
torch.nn.functional.celu(x, alpha, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.rrelu',
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs),
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_kwargs=lambda device, dtype, input:
({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}),
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-03, rtol=1.2e-03),
torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03)
}),
'TestUnaryUfuncs', device_type='cuda',
),
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(
unittest.skip("Skipped!"),
'TestJit', 'test_variant_consistency_jit'
), ],
),
UnaryUfuncInfo(
'nn.functional.selu',
ref=lambda x, inplace=False:
1.0507009873554804934193349852946 * (
np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1))
),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-2, rtol=1.8e-2),
torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
),
UnaryUfuncInfo(
'nn.functional.silu',
ref=lambda x, inplace=False:
x / (1 + np.exp(-x)),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=False,
assert_autodiffed=False,
supports_out=False,
inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({
torch.float16: tol(atol=1e-3, rtol=1e-3),
torch.bfloat16: tol(atol=1e-4, rtol=1e-4)
}),
'TestUnaryUfuncs', device_type='cuda',
), ],
skips=[
# FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j)
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', dtypes=(torch.complex64,)), ],
),
UnaryUfuncInfo(
'nn.functional.hardsigmoid',
ref=reference_hardsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=False,
supports_out=False,
inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ],
skips=[
# still want to test that first derivative works though second derivative isn't supported
DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_gradgrad"),
# produces 0 instead of nan on ROCM
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.bfloat16, torch.float16, torch.float32,), device_type='cuda',
active_if=(TEST_WITH_ROCM)), ]
),
UnaryUfuncInfo(
'nn.functional.logsigmoid',
aten_name="log_sigmoid",
ref=reference_logsigmoid,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
),
UnaryUfuncInfo(
'nn.functional.mish',
ref=lambda x: x * np.tanh(reference_softplus(x)),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
inplace_variant=partial(torch.nn.functional.mish, inplace=True),
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ],
),
UnaryUfuncInfo(
'nn.functional.softsign',
ref=lambda x: x / (np.abs(x) + 1),
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ],
skips=(
# pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j)
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs',
"test_reference_numerics_hard", dtypes=(torch.complex64,)),),
),
UnaryUfuncInfo(
'nn.functional.tanhshrink',
ref=lambda x: x - np.tanh(x),
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
decorators=[
DecorateInfo(
toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), ],
skips=(
# in each case, pytorch will produce a nan while numpy will not
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_normal",
dtypes=(torch.complex64,), active_if=(IS_MACOS)),
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_hard",
dtypes=(torch.complex64,), active_if=(IS_MACOS)),
DecorateInfo(unittest.expectedFailure,
'TestUnaryUfuncs', "test_reference_numerics_extremal",
dtypes=(torch.complex64,), device_type='cpu',
active_if=(IS_MACOS or IS_WINDOWS)),)
),
OpInfo(
'nn.functional.threshold',
ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=False,
supports_autograd=True,
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
sample_inputs_func=sample_inputs_threshold,
),
OpInfo('nextafter',
dtypes=floating_types_and(torch.bfloat16),
supports_autograd=False,
sample_inputs_func=sample_inputs_nextafter),
OpInfo('topk',
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_topk),
# Multiple variants for batch_norm to test with and without cuDNN disabled
# See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details
OpInfo('nn.functional.batch_norm',
aten_name='batch_norm',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_batch_norm),
# This variant tests batch_norm with cuDNN disabled only on CUDA devices
OpInfo('nn.functional.batch_norm',
variant_test_name='without_cudnn',
aten_name='batch_norm',
dtypes=empty_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
decorators=[onlyCUDA, disablecuDNN],
sample_inputs_func=sample_inputs_batch_norm),
# We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the
# standard entry, second is to run gradcheck tests on the second argument.
OpInfo('igamma',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammainc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igamma',
variant_test_name='grad_other',
# Since autograd formula is implemented only for other and
# gradcheck test verifies the formula for input in SampleInput,
# we permute the arguments.
op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs),
inplace_variant=None,
method_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types(),
backward_dtypesIfCUDA=floating_types(),
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igammac',
dtypes=floating_types_and(torch.bfloat16, torch.float16),
aliases=('torch.special.gammaincc',),
dtypesIfCUDA=floating_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igammac',
variant_test_name='grad_other',
# Since autograd formula is implemented only for other and
# gradcheck test verifies the formula for input in SampleInput,
# we permute the arguments
op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs),
inplace_variant=None,
method_variant=None,
dtypes=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types(),
backward_dtypesIfCUDA=floating_types(),
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true : Tensors failed to compare as equal!
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('nn.functional.softshrink',
aten_name="softshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=False,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=False,
),
OpInfo('nn.functional.hardshrink',
aten_name="hardshrink",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardshrink"]),
OpInfo('nn.functional.hardtanh',
aten_name="hardtanh",
dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16),
backward_dtypesIfCPU=all_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::hardtanh"],
),
OpInfo('nn.functional.gelu',
aten_name="gelu",
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_gelu,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_gradgrad=True,
supports_out=False,
autodiff_nonfusible_nodes=["aten::gelu"]),
OpInfo('nn.functional.relu6',
aten_name="relu6",
dtypes=all_types_and(torch.bfloat16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(torch.float16),
supports_autograd=True,
assert_autodiffed=True,
sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh,
supports_gradgrad=True,
supports_out=False,
supports_forward_ad=True,
autodiff_nonfusible_nodes=["aten::relu6"]),
OpInfo('mm',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mm),
OpInfo('mode',
op=torch.mode,
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_mode,),
MvlGammaInfo(variant_test_name='mvlgamma_p_1',
domain=(1, None),
skips=skips_mvlgamma(),
sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
OpInfo('ne',
aliases=('not_equal',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_autograd=False,
sample_inputs_func=sample_inputs_comparison_ops),
OpInfo('narrow',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_narrow),
UnaryUfuncInfo('neg',
aliases=('negative', ),
ref=np.negative,
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
error_inputs_func=error_inputs_neg,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True,),
OpInfo('dist',
op=torch.dist,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_dist),
OpInfo('outer',
op=torch.outer,
aliases=('ger', ),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_outer,),
OpInfo('ormqr',
op=torch.ormqr,
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_ormqr,
decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),
OpInfo('permute',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_permute),
OpInfo('pow',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
# Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled
# for Float16, causing this test to fail. pow's autograd for Float16 is thus currently
# unsupported on CPU.
backward_dtypes=floating_and_complex_types_and(torch.bfloat16),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_pow,
supports_inplace_autograd=False,
supports_forward_ad=True,
assert_autodiffed=True,
),
OpInfo('float_power',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_pow,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view', device_type='cuda'),),),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_qr,
# batched gradients do not work for empty inputs
# https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085
check_batched_gradgrad=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('rad2deg',
ref=np.degrees,
decorators=(precisionOverride({torch.bfloat16: 7e-1,
torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('real',
ref=np.real,
dtypes=complex_types(),
supports_out=False,
supports_forward_ad=True,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip since real and imag don't have out variants.
DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('roll',
ref=np.roll,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_roll),
OpInfo('rot90',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_rot90),
UnaryUfuncInfo('round',
ref=np.round,
aliases=('special.round',),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True,),
UnaryUfuncInfo('sin',
ref=np.sin,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_sparse=True,
supports_sparse_csr=True,
supports_forward_ad=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),
UnaryUfuncInfo('sinc',
ref=np_sinc_with_fp16_as_fp32,
aliases=('special.sinc',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
handles_large_floats=False,
handles_complex_extremals=False,
safe_casts_outputs=True,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.int8]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.complex64]),
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
dtypes=complex_types()),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=False),
supports_forward_ad=True,
supports_out=False,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_autodiffed=True),
OpInfo('split',
variant_test_name='list_args',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=partial(sample_inputs_split, list_args=True),
supports_forward_ad=True,
supports_out=False),
OpInfo('split_with_sizes',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_split_with_sizes,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('__radd__',
op=torch.Tensor.__radd__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __radd__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__radd__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::add'],),
OpInfo('__rdiv__',
op=torch.Tensor.__rdiv__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rdiv__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rdiv__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
supports_forward_ad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
OpInfo('__rmul__',
op=torch.Tensor.__rmul__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rmul__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rmul__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
OpInfo('__rand__',
op=torch.Tensor.__rand__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__ror__',
op=torch.Tensor.__ror__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rxor__',
op=torch.Tensor.__rxor__,
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rmatmul__',
op=torch.Tensor.__rmatmul__,
dtypes=all_types_and_complex_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else [],
torch.complex64, torch.complex128),
backward_dtypesIfCUDA=floating_types_and(torch.float16,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else [],
torch.complex64, torch.complex128),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_matmul,
supports_out=False,
decorators=(
# https://github.com/pytorch/pytorch/issues/67470
DecorateInfo(unittest.skip("67470!"),
'TestCommon', 'test_noncontiguous_samples',
device_type='cpu', dtypes=(torch.long,)),
DecorateInfo(
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view'),
# Fails on XLA.
# AssertionError: False is not true : Tensors failed to compare as equal
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla', dtypes=(torch.long,)),
),
skips=(
# RuntimeError:
# object has no attribute __rmatmul__:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.__rmatmul__(i0, i1)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypes=floating_types_and(torch.bfloat16, torch.half,),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rmod__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rmod__(i0, 3.14)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
supports_autograd=False,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::remainder'],),
OpInfo('__rpow__',
op=torch.Tensor.__rpow__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
# Reference: https://github.com/pytorch/pytorch/issues/54774
# "log2" "_vml_cpu" not implemented for Half
backward_dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
supports_forward_ad=True,
skips=(
# RuntimeError:
# object has no attribute __rpow__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rpow__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
OpInfo('__rsub__',
op=torch.Tensor.__rsub__,
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
skips=(
# RuntimeError:
# object has no attribute __rsub__:
# File "<string>", line 3
# def the_method(i0):
# return torch.__rsub__(i0, 3.14j)
# ~~~~~~~~~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_tensor',
supports_out=False,
supports_inplace_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":52,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950
),
sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),
OpInfo('rsub',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
variant_test_name='rsub_scalar',
supports_out=False,
supports_inplace_autograd=False,
sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
# RuntimeError: false
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":52,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.cfloat, torch.cdouble]), # noqa: B950
),
assert_autodiffed=True,),
OpInfo('select',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_out=False),
OpInfo('select_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_select_scatter,
supports_forward_ad=True,
supports_out=False),
OpInfo('slice_scatter',
dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_slice_scatter,
supports_forward_ad=True,
supports_out=False),
UnaryUfuncInfo('signbit',
ref=np.signbit,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False,),
OpInfo('solve',
op=torch.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
UnaryUfuncInfo('tan',
ref=np.tan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.float64],
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
aliases=('nn.functional.tanh',),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
# "tanh_backward_cpu" not implemented for 'BFloat16'
backward_dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=(IS_MACOS or IS_WINDOWS)),
# alias, nn.functional.tanh, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "tanh" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# tan(j * pi/2 * odd_number) is nan
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 0.5j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
OpInfo('tensor_split',
ref=np.array_split,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_tensor_split,),
OpInfo('hsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hsplit,),
OpInfo('vsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_vsplit,),
OpInfo('dsplit',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_dsplit,),
OpInfo('triangular_solve',
op=torch.triangular_solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_legacy_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs),
decorators=[skipCUDAIfNoMagma]),
UnaryUfuncInfo('trunc',
aliases=('fix', ),
ref=np.trunc,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
assert_autodiffed=True),
UnaryUfuncInfo('exp2',
aliases=('special.exp2', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('expm1',
aliases=('special.expm1', ),
ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
supports_sparse_csr=True,
safe_casts_outputs=True,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),
supports_forward_ad=True,
supports_sparse=True,
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
# Passing numpy_kwargs via sample_kwargs, as numpy does comparison
# with BFloat16 in float, since it currently doesn't support BFloat16.
# Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556
sample_kwargs=lambda device, dtype, input: ({},
{'posinf': torch.finfo(torch.bfloat16).max,
'neginf': torch.finfo(torch.bfloat16).min})
if dtype is torch.bfloat16 else ({}, {})),
UnaryUfuncInfo('reciprocal',
ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
decorators=(precisionOverride({torch.half: 5e-2}),),
safe_casts_outputs=True,
assert_autodiffed=True,
supports_forward_ad=True,
handles_complex_extremals=False),
UnaryUfuncInfo('sqrt',
ref=np.sqrt,
supports_sparse=True,
domain=(0, None),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
supports_forward_ad=True,
supports_sparse_csr=True,
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
safe_casts_outputs=True,
handles_complex_extremals=False),
UnaryUfuncInfo('square',
ref=np.square,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# >>> t.square()
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_lerp,
supports_forward_ad=True,
assert_autodiffed=True),
OpInfo('linalg.inv',
aten_name='linalg_inv',
op=torch.linalg.inv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.inv_ex',
aten_name='linalg_inv_ex',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_invertible,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
),
UnaryUfuncInfo('angle',
ref=np.angle,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
supports_forward_ad=True,
supports_sparse_csr=True,
supports_complex_to_float=True,
skips=(
# The complex formula might be wrong
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
dtypes=complex_types()),),),
UnaryUfuncInfo('isfinite',
ref=np.isfinite,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_autograd=False,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/66402
DecorateInfo(unittest.expectedFailure, "TestUnaryUfuncs", "test_reference_numerics_hard",
device_type='cpu', dtypes=(torch.complex64,), active_if=not (IS_MACOS or IS_WINDOWS)),
)),
UnaryUfuncInfo('isinf',
ref=np.isinf,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isposinf',
ref=np.isposinf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isneginf',
ref=np.isneginf,
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
UnaryUfuncInfo('isreal',
ref=np.isreal,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_autograd=False),
UnaryUfuncInfo('isnan',
ref=np.isnan,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_out=False,
supports_sparse=True,
supports_sparse_csr=True,
supports_autograd=False),
OpInfo('linalg.solve',
aten_name='linalg_solve',
op=torch.linalg.solve,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve,
check_batched_gradgrad=False,
supports_forward_ad=True,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('linalg.solve_triangular',
aten_name='linalg_solve_triangular',
op=torch.linalg.solve_triangular,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_solve_triangular,
# linalg.solve_triangular cannot be batched over because of a call to out.copy_(result);
supports_forward_ad=True),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.matrix_rank',
aten_name='linalg_matrix_rank',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
supports_autograd=False,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
op=torch.linalg.pinv,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# errors with "leaked XXXX bytes CUDA memory on device 0"
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
)),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='singular',
# pinv is Frechet-differentiable in a rank-preserving neighborhood,
# so we feed inputs that are the products of two full-rank factors,
# to avoid any rank changes caused by the perturbations in the gradcheck
op=lambda a, b: torch.linalg.pinv(a @ b.mT),
dtypes=floating_and_complex_types(),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv_singular,
# Only large tensors show issues with implicit backward used prior to
# explicit backward implementation.
decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# test does not work with passing lambda for op
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('linalg.pinv',
aten_name='linalg_pinv',
variant_test_name='hermitian',
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_linalg_pinv_hermitian,
gradcheck_wrapper=gradcheck_wrapper_hermitian_input,
decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],
),
OpInfo('eig',
op=torch.eig,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_eig,
decorators=[
skipCUDAIfNoMagma,
skipCPUIfNoLapack,
skipCUDAIfRocm
],),
OpInfo('einsum',
# we need this lambda because SampleInput expects tensor input as the first argument
# TODO(@heitorschueroff) update SampleInput to handle such cases
op=lambda tensors, equation: torch.einsum(equation, tensors),
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),
backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half,
*[torch.bfloat16] if (SM60OrLater and CUDA11OrLater) else []),
supports_out=False,
supports_forward_ad=True,
check_batched_forward_grad=False,
# See https://github.com/pytorch/pytorch/issues/66357
sample_inputs_func=sample_inputs_einsum,
skips=(
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svd',
op=torch.linalg.svd,
aten_name='linalg_svd',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svd,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCUDAIfRocm,
skipCPUIfNoLapack,
]),
OpInfo('linalg.svdvals',
op=torch.linalg.svdvals,
aten_name='linalg_svdvals',
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_linalg_svdvals,
check_batched_gradgrad=False,
decorators=[
skipCUDAIfNoMagmaAndNoCusolver,
skipCPUIfNoLapack]),
OpInfo('polar',
dtypes=floating_types(),
sample_inputs_func=sample_inputs_polar),
# TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.
# To test reference numerics against multiple values of argument `n`,
# we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).
# We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
UnaryUfuncInfo('special.polygamma',
op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs),
variant_test_name='special_polygamma_n_0',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_1',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_2',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_3',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
variant_test_name='polygamma_n_4',
ref=reference_polygamma if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_WITH_ROCM),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}),
# polygamma functions have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo('ravel',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_ravel,
),
OpInfo('reshape',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_reshape,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('reshape_as',
op=lambda x, other: x.reshape_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_view_as_reshape_as,
supports_out=False,
supports_forward_ad=True,
),
OpInfo('view',
op=lambda x, shape: x.view(shape),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
sample_inputs_func=sample_inputs_view_reshape,
),
OpInfo('view_as',
op=lambda x, other: x.view_as(other),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_view_as_reshape_as,
),
OpInfo('atleast_1d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_atleast1d2d3d,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
),
OpInfo('atleast_2d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('atleast_3d',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]),
),
sample_inputs_func=sample_inputs_atleast1d2d3d,
),
OpInfo('pinverse',
op=torch.pinverse,
dtypes=floating_and_complex_types(),
check_batched_grad=False,
check_batched_gradgrad=False,
supports_forward_ad=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
sample_inputs_func=sample_inputs_linalg_invertible,
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack]),
OpInfo('gather',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_gather,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_forward_ad=True,
),
OpInfo('index_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_fill),
OpInfo('index_copy',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_copy,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_select',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_index_select,
supports_forward_ad=True,
assert_jit_shape_analysis=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('index_add',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_index_add,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
OpInfo('__getitem__',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
skips=(
# AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),
),
assert_jit_shape_analysis=False, # TODO: support index.Tensor()
sample_inputs_func=sample_inputs_getitem,),
OpInfo('index_put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=True,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
# RuntimeError: The following operation failed in the TorchScript interpreter.
# Traceback of TorchScript (most recent call last):
# File "<string>", line 3, in forward
# def the_method(i0, i1: List[torch.Tensor], i2):
# return torch.index_put(i0, i1, i2, accumulate=False)
# ~~~~~~~~~~~~~~~ <--- HERE
# RuntimeError: a leaf Variable that requires grad is being used in an in-place operation.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_sort,
skips=(
# sort does not correctly warn when resizing out= inputs
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError not raised
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
)),
OpInfo('unique',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False,
skips=(
# RuntimeError:
# 'Tensor (inferred)' object has no attribute or method 'unique'.:
# File "<string>", line 3
#
# def the_method(i0):
# return i0.unique(sorted=False, return_inverse=False, return_counts=False, dim=None)
# ~~~~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('unique_consecutive',
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
sample_inputs_func=sample_inputs_unique_consecutive,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
check_batched_gradgrad=False, # vmap complains of the sizes
sample_inputs_func=sample_inputs_put),
OpInfo('take',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
check_batched_grad=False, # vmap complains of the sizes
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take),
OpInfo('scatter',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter,),
OpInfo('bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_forward_ad=True,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='channels_last',
sample_inputs_func=sample_inputs_conversion_channels_last,
supports_autograd=False,
skips=(
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('empty_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('zeros_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('ones_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('randn_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('rand_like',
dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex64, torch.complex128),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randn_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_like_fns,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('randint_like',
dtypes=all_types_and(torch.half, torch.bfloat16),
op=lambda inp, *args, **kwargs:
wrapper_set_seed(torch.randint_like, inp, *args, **kwargs),
supports_out=False,
sample_inputs_func=sample_inputs_randint_like,
supports_autograd=False,
skips=(
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('full_like',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_full_like,
supports_autograd=False,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
OpInfo('new_zeros',
op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_ones',
op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_empty',
op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_fns,
skips=(
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'),
# Empty tensor data is garbage so it's hard to make comparisons with it.
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('new_full',
op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_new_full,
skips=(
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
supports_autograd=False),
OpInfo('scatter_add',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_scatter_add,
supports_out=False
),
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
assert_autodiffed=True,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('hypot',
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_hypot,
),
OpInfo('histogram',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU
sample_inputs_func=sample_inputs_histogram,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0):
# return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False)
# ~~~~~~ <--- HERE
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Not Implemented on XLA.
DecorateInfo(unittest.expectedFailure, 'TestOpInfo', device_type='xla'),
)),
OpInfo('histogramdd',
dtypes=floating_types(),
dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU
sample_inputs_func=sample_inputs_histogramdd,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('histc',
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64),
sample_inputs_func=sample_inputs_histc,
supports_out=True,
supports_autograd=False,
skips=(
# CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor
# "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast
# from a result of dtype torch.float32 into an out= with dtype torch.long"
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'),
)),
OpInfo('bincount',
dtypes=integral_types_and(),
sample_inputs_func=sample_inputs_bincount,
supports_out=False,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('bucketize',
dtypes=all_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_bucketize,
supports_autograd=False,
skips=(
# JIT tests don't work with Tensor keyword arguments
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('searchsorted',
dtypes=all_types(),
dtypesIfCPU=all_types_and(torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and(torch.float16),
sample_inputs_func=sample_inputs_searchsorted,
supports_autograd=False,
ref=reference_searchsorted,
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('cat',
ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),
aliases=('concat',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_cat_concat,
supports_forward_ad=True,
assert_autodiffed=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
)),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
skips=(
# Skip operator schema test because this is a functional and not an operator
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
dtypesIfROCM=all_types_and(torch.float16),
check_batched_gradgrad=False,
skips=(
# msort does not correctly warn when resizing out= inputs.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Expected RuntimeError when doing an unsafe cast from a result of dtype
# torch.float32 into an out= with dtype torch.long
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'),
),
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
aliases=('moveaxis',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_movedim_moveaxis),
OpInfo('renorm',
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_renorm),
ShapeFuncInfo('repeat',
op=lambda x, dims: x.repeat(dims),
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('squeeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
assert_jit_shape_analysis=True,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_squeeze),
OpInfo('fill_',
op=lambda x, scalar: torch.fill_(x.clone(), scalar),
method_variant=None,
inplace_variant=torch.Tensor.fill_,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_fill_),
OpInfo('resize_',
op=lambda x, shape: x.clone().resize_(shape),
method_variant=None,
inplace_variant=torch.Tensor.resize_,
# the test fails because resize_ doesn't work with imag views as expected by the test
# https://github.com/pytorch/pytorch/issues/65945
test_neg_view=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# resize_ is raising an error on input that requires grad on purpose
DecorateInfo(
unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),
'TestGradients',
'test_nondifferentiable',
),
DecorateInfo(unittest.skip("Allowed exception"), 'TestCommon', 'test_composite_compliance'),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('resize_as_',
op=lambda x, other: torch.resize_as_(x.clone(), other),
method_variant=None,
inplace_variant=torch.Tensor.resize_as_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
skips=(
# resize_ is raising an error on input that requires grad on purpose
DecorateInfo(
unittest.skip('Skipped! Resizing of variables that require grad is not supported.'),
'TestGradients',
'test_nondifferentiable',
),
),
sample_inputs_func=sample_inputs_resize_ops),
OpInfo('take_along_dim',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_take_along_dim,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
ShapeFuncInfo('tile',
ref=np.tile,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_repeat_tile),
OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid'
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('trapezoid',
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_trapezoid),
OpInfo('cumulative_trapezoid',
dtypes=all_types_and_complex_and(),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_cumulative_trapezoid),
OpInfo('unsqueeze',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
assert_jit_shape_analysis=True,
assert_autodiffed=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
sample_inputs_func=sample_unsqueeze),
OpInfo('xlogy',
aliases=('special.xlogy',),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_xlogy),
OpInfo('zero_',
op=lambda x: torch.zero_(x.clone()),
method_variant=None,
inplace_variant=torch.Tensor.zero_,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('special.xlog1py',
aten_name='special_xlog1py',
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_xlog1py),
OpInfo('special.zeta',
aten_name='special_zeta',
dtypes=all_types_and(torch.bool),
supports_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_binary_pwise),
# OpInfo entry to verify the gradient formula of `other`/`q`
OpInfo('special.zeta',
op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs),
aten_name='special_zeta',
variant_test_name='grad',
dtypes=all_types_and(torch.bool),
supports_autograd=True,
safe_casts_outputs=True,
skips=(
# Lambda doesn't work in JIT test
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),
),
sample_inputs_func=sample_inputs_zeta),
OpInfo('logsumexp',
aliases=('special.logsumexp',),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_logsumexp),
OpInfo('trace',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_trace),
OpInfo('transpose',
aliases=('swapdims', 'swapaxes'),
assert_jit_shape_analysis=True,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
sample_inputs_func=sample_inputs_transpose_swapdims),
OpInfo('T',
op=lambda x: x.T,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('H',
op=lambda x: x.H,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_T),
OpInfo('mT',
op=lambda x: x.mT,
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('mH',
op=lambda x: x.mH,
aliases=('adjoint',),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=True,
skips=( # Lambda doesn't work in JIT test
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),),
sample_inputs_func=sample_inputs_adjoint),
OpInfo('tril',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('triu',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tril_triu),
OpInfo('kron',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_inplace_autograd=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_kron),
OpInfo('inner',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
sample_inputs_func=sample_inputs_inner,
),
OpInfo('tensordot',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_tensordot,
skips=(
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
op=lambda x, *args: x.to_sparse(*args),
sample_inputs_func=sample_inputs_to_sparse,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
backward_dtypes=floating_types(),
backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
check_batched_grad=False,
check_batched_gradgrad=False,
skips=(
# NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend
DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'),
# TODO: FIXME: complex inputs requiring grad error in forward
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# JIT has issue when op is passed as lambda
# NotImplementedError: Cannot access storage of SparseTensorImpl
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# Allowed exception: sparse tensors don't have strides
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
)
),
OpInfo('logcumsumexp',
dtypes=floating_types_and(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
backward_dtypesIfCUDA=floating_types_and(),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
aliases=('special.expit', 'nn.functional.sigmoid'),
ref=reference_sigmoid if TEST_SCIPY else _NOTHING,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.complex64: 1e-1,
torch.bfloat16: 1e-2}),),
skips=(
# TODO: FIXME: sigmoid fails on complex inputs that require grad
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cuda', dtypes=[torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cuda', dtypes=[torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
# alias, nn.functional.sigmoid, will produce (because of warning string saved):
# "RuntimeError: Expected to not find "sigmoid" but found it"
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
supports_forward_ad=True,
assert_autodiffed=True,
# sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero
reference_numerics_filter=NumericsFilter(
condition=lambda x: (close_to_int(x / (math.pi * 1j))
if x.is_complex() else x.new_tensor(False, dtype=torch.bool)),
safe_val=0)),
UnaryUfuncInfo('digamma',
ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,
aliases=('special.psi', 'special.digamma',),
decorators=(precisionOverride({torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
safe_casts_outputs=True),
UnaryUfuncInfo('special.entr',
ref=scipy.special.entr if TEST_SCIPY else _NOTHING,
aten_name='special_entr',
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-1,
torch.bfloat16: 1e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
safe_casts_outputs=True,
sample_inputs_func=sample_inputs_entr),
UnaryUfuncInfo('special.ndtri',
ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aten_name='special_ndtri',
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
UnaryUfuncInfo('erf',
ref=scipy.special.erf if TEST_SCIPY else _NOTHING,
aliases=('special.erf', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
skips=(
DecorateInfo(unittest.skip("Skipped! sparse backward not supported"),
'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'),
),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
assert_jit_shape_analysis=True,
supports_sparse=True,
supports_sparse_csr=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,
aliases=('special.erfc', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
assert_autodiffed=True,
safe_casts_outputs=True),
UnaryUfuncInfo('erfinv',
ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,
aliases=('special.erfinv', ),
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2,
torch.float32: 1e-4}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
safe_casts_outputs=True,
supports_sparse_csr=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
)),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else _NOTHING,
aliases=('special.gammaln', ),
decorators=(precisionOverride({torch.float16: 7e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
device_type='cpu', dtypes=[torch.bfloat16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
safe_casts_outputs=True,
# lgamma have multiple singularities at x <= 0
reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)),
OpInfo(
'logdet',
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_logdet,
decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),
# `log_softmax` supports different dtypes based on whether `dtype` argument,
# is passed or not. Hence two OpInfo entries, one with dtype and other without.
OpInfo(
'log_softmax',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_softmax_variant,
assert_autodiffed=True),
OpInfo(
'log_softmax',
variant_test_name='dtype',
aliases=('special.log_softmax', 'nn.functional.log_softmax'),
supports_out=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True),
assert_autodiffed=True),
UnaryUfuncInfo('logit',
ref=scipy.special.logit if TEST_SCIPY else _NOTHING,
domain=(0, 1),
aliases=('special.logit', ),
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-1,
torch.float16: 5e-1}),),
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_logit,
safe_casts_outputs=True),
OpInfo('where',
# Currently only the `input` is tested in gradcheck.
# If we pass `condition` first, none of the input which supports
# autograd will be tested. Hence the following lambda.
op=lambda self, condition, other: torch.where(condition, self, other),
sample_inputs_func=sample_inputs_where,
supports_out=False,
skips=(
# test does not work with passing lambda for op
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),
OpInfo('nonzero',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_nonzero,
supports_autograd=False,
skips=(
# https://github.com/pytorch/pytorch/issues/67458
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# nonzero is not raising a warning when the out is resized
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'),
# Can't find schemas for this operator for some reason
DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)),
# `torch.norm` has multiple code paths depending on the value of `p`.
# These paths have different dtype support. Also JIT supports,
# most variants but not all of them. So we split the OpInfo entries,
# for `norm` based on the code-paths and JIT support.
OpInfo('norm',
sample_inputs_func=sample_inputs_norm,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16)),
OpInfo('norm',
variant_test_name='nuc',
sample_inputs_func=sample_inputs_norm_nuc,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types(),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)
),
OpInfo('norm',
variant_test_name='fro',
sample_inputs_func=sample_inputs_norm_fro,
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# Pre-existing condition; Needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),
# Arguments for call are not valid.
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950
)),
OpInfo('norm',
variant_test_name='inf',
sample_inputs_func=sample_inputs_norm_inf,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# https://github.com/pytorch/pytorch/issues/67517
DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'),
# following 2 tests failed intermittenly
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cpu', dtypes=(torch.complex128,)), # noqa: B950
)
),
OpInfo('t',
sample_inputs_func=sample_inputs_t,
supports_out=False,
supports_forward_ad=True,
autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused
autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,),
UnaryUfuncInfo('special.erfcx',
ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING,
aten_name='special_erfcx',
decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),),
dtypes=all_types_and(torch.bool),
safe_casts_outputs=True),
OpInfo(
"nn.functional.dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs),
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# inplace variant dispatches to dropout kernel, while on CUDA
# the op dispatches to _fused_dropout (with a few more conditions)
# hence, different values and this skip here
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
# On CUDA, the op is dispatched (and a few more conditions) to
# _fused_dropout, which doesn't support forward AD
DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
# https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.feature_alpha_dropout",
op=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs),
ref=_NOTHING,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# Probably because we have used lambda for the op here
# AssertionError: JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),
gradcheck_wrapper=wrapper_set_seed,
supports_forward_ad=True,
supports_out=False,
sample_inputs_func=sample_inputs_dropout,
inplace_variant=lambda input, *args, **kwargs:
wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)),
OpInfo(
"nn.functional.one_hot",
ref=reference_one_hot,
supports_out=False,
dtypes=_dispatch_dtypes((torch.int64,)),
sample_inputs_func=sample_inputs_one_hot,
),
OpInfo(
"nn.functional.embedding",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
skips=(
# Does not work with lambda
# Raises : JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
# Reference: https://github.com/pytorch/pytorch/issues/67084
DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),
),
supports_out=False,
),
OpInfo(
"nn.functional.embedding_bag",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
# backward is not supported for mode `max` and dtype `bfloat16`
backward_dtypesIfCUDA=floating_types_and(torch.float16),
sample_inputs_func=sample_inputs_embedding_bag,
skips=(
# Does not work with lambda
# Raises : JIT Test does not execute any logic
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False,
supports_gradgrad=False,
),
OpInfo(
"nn.functional.softplus",
ref=reference_softplus,
sample_inputs_func=sample_inputs_softplus,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
),
OpInfo(
"linalg.tensorinv",
ref=np.linalg.tensorinv,
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"linalg.tensorsolve",
ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims),
dtypes=floating_and_complex_types(),
sample_inputs_func=sample_inputs_tensorsolve,
supports_forward_ad=True,
decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver],
),
OpInfo(
"nn.functional.mse_loss",
ref=reference_mse_loss,
sample_inputs_func=sample_inputs_mse_loss,
supports_out=False,
dtypes=floating_types_and(torch.float16),
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.grid_sample",
ref=_NOTHING,
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.float16),
supports_out=False,
sample_inputs_func=sample_inputs_grid_sample,
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15),
OpInfo(
"argwhere",
ref=np.argwhere,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
sample_inputs_func=sample_inputs_argwhere,
),
ReductionOpInfo(
'all',
identity=True,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.all),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'any',
identity=False,
supports_multiple_dims=False,
supports_out=False,
supports_autograd=False,
result_dtype=torch.bool,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.any),
skips=(
# FIXME: does not support passing keepdim without dim
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
'amax',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'amin',
nan_policy='propagate',
dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
'argmax',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'argmin',
supports_multiple_dims=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
'count_nonzero',
identity=0,
supports_out=False,
supports_autograd=False,
result_dtype=torch.int64,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_reduction_count_nonzero,
ref=reference_reduction_numpy(np.count_nonzero),
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
'mean',
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
assert_autodiffed=True,
assert_jit_shape_analysis=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.mean),
skips=(
# FIXME: mean does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: mean reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: mean does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'nanmean',
nan_policy='omit',
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
ref=reference_reduction_numpy(np.nanmean),
skips=(
# AssertionError: False is not true :
# Failure in testing nodes' autodifferentiation.
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
device_type='cuda', dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
'std',
nan_policy='propagate',
supports_out=False,
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.std),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=None not supported
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# TODO(@heitorschueroff) std return float for complex types
# need to find a better way to model result dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'var',
nan_policy='propagate',
supports_out=False,
assert_autodiffed=True,
promotes_int_to_float=True,
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_std_var,
ref=reference_std_var(np.var),
generate_args_kwargs=generate_std_var_kwargs,
skips=(
# FIXME: cannot specify keepdim without dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: dim=None not supported
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: dim=[] reduces all dimensions
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# TODO(@heitorschueroff) std return float for complex types
# need to find a better way to model result dtype
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'),
# NumPy is giving NaN for this
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'),
),
),
ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',
supports_multiple_dims=False,
supports_out=False,
promotes_int_to_int64=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_prod,
ref=reference_reduction_numpy(np.prod),
skips=(
# FIXME: prod does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16, torch.complex64]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.uint8, torch.float16, torch.complex64]),
),
),
ReductionOpInfo(
'sum',
identity=0,
nan_policy='propagate',
supports_out=False,
supports_forward_ad=True,
promotes_int_to_int64=True,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.sum),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: sum does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'nansum',
identity=0,
nan_policy='omit',
supports_out=False,
promotes_int_to_int64=True,
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.nansum),
skips=(
# FIXME: nansum does not support passing keepdim without passing dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: nansum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: nansum does not support passing None to dim
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
dtypes=[torch.float16]),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
dtypes=[torch.float16]),
),
),
ReductionOpInfo(
'_masked.sum',
ref=reference_reduction_numpy(np.sum),
method_variant=None,
identity=0,
nan_policy='propagate',
supports_out=False,
promotes_int_to_int64=False,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}),
'TestReductions', 'test_ref_small_input'),
],
sample_inputs_func=sample_inputs_masked_reduction
),
ReductionOpInfo(
'_masked.prod',
ref=reference_reduction_numpy(np.prod),
method_variant=None,
identity=1,
nan_policy='propagate',
supports_out=False,
promotes_int_to_int64=True,
# FIXME: "prod_cpu" not implemented for 'BFloat16'
# FIXME: "prod_cpu" not implemented for 'Half'
dtypes=all_types_and_complex_and(torch.bool),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
skips=(
# NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_ref_duplicate_values'),
],
sample_inputs_func=sample_inputs_masked_reduction
),
ReductionOpInfo(
'_masked.amax',
nan_policy='propagate',
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.amin',
nan_policy='propagate',
supports_out=False,
dtypes=all_types_and(torch.float16, torch.bfloat16),
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: amax reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: Unknown builtin op: aten::iinfo
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.mean',
ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}),
'TestReductions', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_reduction,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.norm',
identity=0,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# torch.jit.frontend.NotSupportedError: Compiled functions
# can't take variable number of arguments or use
# keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_masked_norm,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
ReductionOpInfo(
'_masked.var',
ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None,
method_variant=None,
nan_policy='propagate',
supports_out=False,
promotes_int_to_float=True,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# RuntimeError: undefined value tensor
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_reference_masked'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestReductions', 'test_ref_small_input'),
DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
sample_inputs_func=sample_inputs_masked_var,
gradcheck_wrapper=gradcheck_wrapper_masked_operation
),
OpInfo(
'_masked.softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.log_softmax',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
decorators=[
DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}),
'TestMasked', 'test_reference_masked'),
],
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.softmin',
method_variant=None,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_softmax,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
'_masked.normalize',
method_variant=None,
dtypes=floating_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_masked_normalize,
skips=(
# torch.jit.frontend.NotSupportedError: Compiled
# functions can't take variable number of arguments or
# use keyword-only arguments with defaults
DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# RuntimeError: "clamp_min_cpu" not implemented for 'Half'
DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_reference_masked',
device_type='cpu', dtypes=[torch.half]),
),
gradcheck_wrapper=gradcheck_wrapper_masked_operation,
supports_out=False),
OpInfo(
"nn.functional.ctc_loss",
ref=_NOTHING,
dtypes=floating_types(),
supports_out=False,
sample_inputs_func=sample_inputs_ctc_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67462
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_grad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
# RuntimeError: derivative for aten::_ctc_loss_backward is not implemented
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
# Operation calls data_ptr() somewhere; needs to be fixed
DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_composite_compliance'),
),
),
OpInfo(
"nn.functional.cosine_embedding_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16, torch.bool),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool),
supports_out=False,
sample_inputs_func=sample_inputs_cosine_embedding_loss,
),
OpInfo(
"nn.functional.nll_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
skips=(
# RuntimeError:
# undefined value tensor:
# File "<string>", line 3
# def the_method(i0, i1):
# return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32))
# ~~~~~~ <--- HERE
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.gaussian_nll_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_gaussian_nll_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
),
),
OpInfo(
"nn.functional.hinge_embedding_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_hinge_embedding_loss,
),
OpInfo(
"nn.functional.huber_loss",
ref=_NOTHING,
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_huber_loss,
skips=(
# JIT does not support variadic tensors.
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270,
# please report a bug to PyTorch.
DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),),
)
),
OpInfo(
"nn.functional.poisson_nll_loss",
ref=_NOTHING,
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_poisson_nll_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67461
# torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_grad",
dtypes=(torch.float64,),
),
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_fn_gradgrad",
dtypes=(torch.float64,),
),
DecorateInfo(
unittest.expectedFailure,
"TestGradients",
"test_forward_mode_AD",
dtypes=(torch.float64,),
),
),
),
OpInfo(
"argsort",
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_argsort,
supports_out=False,
supports_autograd=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32,),
),
),
),
OpInfo(
"repeat_interleave",
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_repeat_interleave,
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pairwise_distance",
ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: (
np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p)
),
sample_inputs_func=sample_inputs_pairwise_distance,
dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),
supports_out=False,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_shuffle",
sample_inputs_func=sample_inputs_pixel_shuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
),
OpInfo(
"nn.functional.pixel_unshuffle",
sample_inputs_func=sample_inputs_pixel_unshuffle,
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
skips=(
DecorateInfo(
unittest.skip("Skipped!"),
"TestJit",
"test_variant_consistency_jit",
dtypes=(torch.float32, torch.complex64),
),
),
)
]
# Common operator groupings
unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]
binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)]
spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]
sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse]
sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr]
shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]
reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)]
reference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)]
reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')]
# TODO: review porting these to make_tensor
def index_variable(shape, max_indices, device=torch.device('cpu')):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.zeros(*shape, dtype=torch.long, device=device)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices, device=device)[:shape[batch_dim]])
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
# TODO: move all tri/tril/triu testing to tensor creation op test suite and remove
# these from here
def _compare_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
else:
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.tril(offset).nonzero().to(dtype).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.ones(row, col, device='cpu')
.triu(offset).nonzero().to(dtype).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device))
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device='cpu'):
l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.tril_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \
.nonzero()[-100:-1, :].transpose(0, 1).to(device)
torch.cuda.empty_cache()
r = torch.triu_indices(
row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1)
]
tri_large_tests_args: List[Tuple[int, ...]] = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(
3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(
l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(
u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(
1, 1, device=device, layout=torch.sparse_coo))
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(
1, 1, device=device, layout=torch.sparse_coo))
# TODO: move into common_utils.py or the test suite(s) that use this
def unpack_variables(args):
if isinstance(args, tuple):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
class dont_convert(tuple):
pass
non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
# TODO: move into common_utils.py or the test suite(s) that use this
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
def conjugate(tensor):
return tensor.conj()
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = conjugate(torch.randn((), dtype=dtype, device=device))
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
# double check casting
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
if arg.tensor.dtype == torch.float:
return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))
if arg.tensor.dtype == torch.cfloat:
return conjugate(maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
if arg.dtype == torch.cfloat:
arg = arg.to(torch.cdouble)
if arg.is_complex() != dtype.is_complex:
raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
"which is not supported for now")
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
return v
elif callable(arg):
return map_arg(arg(dtype=dtype, device=device))
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Initialization for Jupyter Notebooks."""
import importlib
import io
import os
import sys
import traceback
import warnings
from contextlib import redirect_stdout
from functools import wraps
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import ipywidgets as widgets
import pandas as pd
import yaml
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import HTML, display
from matplotlib import MatplotlibDeprecationWarning
try:
import seaborn as sns
except ImportError:
sns = None
from .._version import VERSION
from ..common.azure_auth_core import check_cli_credentials, AzureCliStatus
from ..common.check_version import check_version
from ..common.exceptions import MsticpyException, MsticpyUserError
from ..common.pkg_config import get_config, validate_config
from ..common.utility import (
check_and_install_missing_packages,
check_kwargs,
is_ipython,
md,
search_for_file,
unit_testing,
)
from ..config import MpConfigFile
from ..datamodel.pivot import Pivot
from .azure_ml_tools import check_versions as check_versions_aml
from .azure_ml_tools import is_in_aml
from .user_config import load_user_defaults
__version__ = VERSION
__author__ = "Ian Hellen"
_IMPORT_ERR_MSSG = """
<h2><font color='red'>One or more missing packages detected</h2>
Please correct these by installing the required packages, restart
the kernel and re-run the notebook.</font>
<i>Package error: {err}</i><br>
"""
_IMPORT_MODULE_MSSG = """
<font color='red'>Error import module {module}</font>
"""
_MISSING_PKG_WARN = """
<h3><font color='orange'>Warning {package} is not installed or has an
incorrect version</h3></font>
"""
_HELP_URIS = [
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'A%20Getting%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Getting Started (notebook)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'ConfiguringNotebookEnvironment.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Configuring your Notebook environment (notebook)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'msticpyconfig.html"'
'target="_blank" rel="noopener noreferrer">'
"Configuring MSTICPy settings (doc)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'SettingsEditor.html"'
'target="_blank" rel="noopener noreferrer">'
"MSTICPy settings editor (doc)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/'
'master/TroubleShootingNotebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Trouble-Shooting Notebooks (notebook)</a></li>"
),
]
_MISSING_MPCONFIG_ENV_ERR = f"""
<h3><font color='orange'>Warning: no <i>msticpyconfig.yaml</i> found</h3></font>
The MSTICPYCONFIG environment variable is set but does not point
to a valid file.<br>
Some functionality (such as Threat Intel lookups) will not function without
valid configuration settings.<br>
The following resources will help you set up your configuration:
<ul>{"".join(_HELP_URIS)}</ul>
<br>You can load and run the first two of these from the Microsoft Sentinel
<b>Notebooks</b> tab
"""
_PANDAS_REQ_VERSION = (0, 25, 0)
def _get_verbosity_setting() -> Callable[[Optional[int]], int]:
"""Closure for holding trace setting."""
_verbosity = 1
def _verbose(verbosity: Optional[int] = None) -> int:
nonlocal _verbosity
if verbosity is not None:
_verbosity = verbosity
return _verbosity
return _verbose
_VERBOSITY: Callable[[Optional[int]], int] = _get_verbosity_setting()
_NB_IMPORTS = [
dict(pkg="pandas", alias="pd"),
dict(pkg="IPython", tgt="get_ipython"),
dict(pkg="IPython.display", tgt="display"),
dict(pkg="IPython.display", tgt="HTML"),
dict(pkg="IPython.display", tgt="Markdown"),
dict(pkg="ipywidgets", alias="widgets"),
dict(pkg="pathlib", tgt="Path"),
dict(pkg="matplotlib.pyplot", alias="plt"),
dict(pkg="matplotlib", tgt="MatplotlibDeprecationWarning"),
dict(pkg="numpy", alias="np"),
]
if sns is not None:
_NB_IMPORTS.append(dict(pkg="seaborn", alias="sns"))
_MP_IMPORTS = [
dict(pkg="msticpy"),
dict(pkg="msticpy.data", tgt="QueryProvider"),
dict(pkg="msticpy.nbtools.foliummap", tgt="FoliumMap"),
dict(pkg="msticpy.common.utility", tgt="md"),
dict(pkg="msticpy.common.utility", tgt="md_warn"),
dict(pkg="msticpy.common.wsconfig", tgt="WorkspaceConfig"),
dict(pkg="msticpy.datamodel.pivot", tgt="Pivot"),
dict(pkg="msticpy.datamodel", tgt="entities"),
dict(pkg="msticpy.vis", tgt="mp_pandas_plot"),
]
_MP_IMPORT_ALL = [
dict(module_name="msticpy.nbtools"),
dict(module_name="msticpy.sectools"),
]
_CONF_URI = (
"https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html"
)
_AZNB_GUIDE = (
"Please run the <i>Getting Started Guide for Azure Sentinel "
+ "ML Notebooks</i> notebook."
)
_AZ_CLI_WIKI_URI = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/wiki/"
"Caching-credentials-with-Azure-CLI"
)
_CLI_WIKI_MSSG_GEN = (
f"For more information see <a href='{_AZ_CLI_WIKI_URI}'>"
"Caching credentials with Azure CLI</>"
)
_CLI_WIKI_MSSG_SHORT = (
f"see <a href='{_AZ_CLI_WIKI_URI}'>Caching credentials with Azure CLI</>"
)
current_providers: Dict[str, Any] = {} # pylint: disable=invalid-name
def _pr_output(*args):
"""Output to IPython display or print."""
if not _VERBOSITY():
return
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def _err_output(*args):
"""Output to IPython display or print - always output regardless of verbosity."""
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def init_notebook(
namespace: Dict[str, Any],
def_imports: str = "all",
additional_packages: List[str] = None,
extra_imports: List[str] = None,
**kwargs,
) -> bool:
"""
Initialize the notebook environment.
Parameters
----------
namespace : Dict[str, Any]
Namespace (usually globals()) into which imports
are to be populated.
def_imports : str, optional
Import default packages. By default "all".
Possible values are:
- "all" - import all packages
- "nb" - import common notebook packages
- "msticpy" - import msticpy packages
- "none" (or any other value) don't load any default packages.
additional_packages : List[str], optional
Additional packages to be pip installed,
by default None.
Packages are specified by name only or version
specification (e.g. "pandas>=0.25")
user_install : bool, optional
Install packages in the "user" rather than system site-packages.
Use this option if you cannot or do not want to update the system
packages.
You should usually avoid using this option with standard Conda environments.
extra_imports : List[str], optional
Additional import definitions, by default None.
Imports are specified as up to 3 comma-delimited values
in a string:
"{source_pkg}, [{import_tgt}], [{alias}]"
`source_pkg` is mandatory - equivalent to a simple "import xyz"
statement.
`{import_tgt}` specifies an object to import from the package
equivalent to "from source_pkg import import_tgt"
`alias` allows renaming of the imported object - equivalent to
the "as alias" part of the import statement.
If you want to provide just `source_pkg` and `alias` include
an additional placeholder comma: e.g. "pandas, , pd"
friendly_exceptions : Optional[bool]
Setting this to True causes msticpy to hook the notebook
exception hander. Any exceptions derived from MsticpyUserException
are displayed but do not produce a stack trace, etc.
Defaults to system/user settings if no value is supplied.
verbose : Union[int, bool], optional
Controls amount if status output, by default 1
0 = No output
1 or False = Brief output (default)
2 or True = Detailed output
no_config_check : bool, optional
Skip the check for valid configuration. Default is False.
verbosity : int, optional
Returns
-------
bool
True if successful
Raises
------
MsticpyException
If extra_imports data format is incorrect.
If package with required version check has no version
information.
"""
global current_providers # pylint: disable=global-statement, invalid-name
check_kwargs(
kwargs,
[
"user_install",
"friendly_exceptions",
"no_config_check",
"verbosity",
"verbose",
],
)
user_install: bool = kwargs.pop("user_install", False)
friendly_exceptions: Optional[bool] = kwargs.pop("friendly_exceptions", None)
no_config_check: bool = kwargs.pop("no_config_check", False)
_set_verbosity(**kwargs)
_pr_output("<hr><h4>Starting Notebook initialization...</h4>")
# Check Azure ML environment
if is_in_aml():
check_versions_aml(*_get_aml_globals(namespace))
else:
# If not in AML check and print version status
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
check_version()
_pr_output(stdout_cap.getvalue())
# Handle required packages and imports
_pr_output("Processing imports....")
imp_ok = _global_imports(
namespace, additional_packages, user_install, extra_imports, def_imports
)
# Configuration check
if no_config_check:
conf_ok = True
else:
_pr_output("Checking configuration....")
conf_ok = _get_or_create_config()
_check_azure_cli_status()
# Notebook options
_pr_output("Setting notebook options....")
_set_nb_options(namespace)
# Set friendly exceptions
if friendly_exceptions is None:
friendly_exceptions = get_config("msticpy.FriendlyExceptions")
if friendly_exceptions:
if _VERBOSITY() == 2: # type: ignore
_pr_output("Friendly exceptions enabled.")
InteractiveShell.showtraceback = _hook_ipython_exceptions(
InteractiveShell.showtraceback
)
# load pivots
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
_load_pivots(namespace=namespace)
_pr_output(stdout_cap.getvalue())
# User defaults
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
prov_dict = load_user_defaults()
_pr_output(stdout_cap.getvalue())
if prov_dict:
namespace.update(prov_dict)
current_providers = prov_dict
_pr_output("Autoloaded components:", ", ".join(prov_dict.keys()))
# show any warnings
init_status = _show_init_warnings(imp_ok, conf_ok)
_pr_output("<h4>Notebook initialization complete</h4>")
return init_status
def _show_init_warnings(imp_ok, conf_ok):
if imp_ok and conf_ok:
return True
md("<font color='orange'><h3>Notebook setup completed with some warnings.</h3>")
if not imp_ok:
md("One or more libraries did not import successfully.")
md(_AZNB_GUIDE)
if not conf_ok:
md("One or more configuration items were missing or set incorrectly.")
md(
_AZNB_GUIDE
+ f" and the <a href='{_CONF_URI}'>msticpy configuration guide</a>."
)
md("This notebook may still run but with reduced functionality.")
return False
def _set_verbosity(**kwargs):
"""Set verbosity of output from boolean or int `verbose` param."""
verbosity = 1
verb_param = kwargs.pop("verbose", kwargs.pop("verbosity", 1))
if isinstance(verb_param, bool):
verbosity = 2 if verb_param else 1
elif isinstance(verb_param, int):
verbosity = min(2, max(0, verb_param))
_VERBOSITY(verbosity)
def list_default_imports():
"""List the default imports for `init_notebook`."""
for imp_group in (_NB_IMPORTS, _MP_IMPORTS):
for imp_item in imp_group:
if "tgt" in imp_item:
import_line = f"from {imp_item["pkg"]} import {imp_item["tgt"]}"
else:
import_line = f"import {imp_item["pkg"]}"
if "alias" in imp_item:
import_line += f" as {imp_item["alias"]}"
_pr_output(import_line)
for imp_item in _MP_IMPORT_ALL:
_pr_output(f"from {imp_item["module_name"]} import *")
def _extract_pkg_name(
imp_pkg: Optional[Dict[str, str]] = None,
pkg: str = None,
tgt: str = None,
alias: str = None,
) -> str:
"""Return string representation of package import."""
if imp_pkg:
pkg = imp_pkg.get("pkg")
tgt = imp_pkg.get("tgt")
alias = imp_pkg.get("alias")
import_item = f"{pkg}.{tgt}" if tgt else pkg
if alias:
import_item = f"{alias} ({import_item})"
return import_item # type: ignore
PY_VER_VAR = "REQ_PYTHON_VER"
MP_VER_VAR = "REQ_MSTICPY_VER"
MP_EXTRAS = "REQ_MP_EXTRAS"
def _get_aml_globals(namespace: Dict[str, Any]):
"""Return global values if found."""
py_ver = namespace.get(PY_VER_VAR, "3.6")
mp_ver = namespace.get(MP_VER_VAR, __version__)
extras = namespace.get(MP_EXTRAS)
return py_ver, mp_ver, extras
def _global_imports(
namespace: Dict[str, Any],
additional_packages: List[str] = None,
user_install: bool = False,
extra_imports: List[str] = None,
def_imports: str = "all",
):
import_list = []
imports = _build_import_list(def_imports)
try:
for imp_pkg in imports:
_imp_from_package(nm_spc=namespace, **imp_pkg)
import_list.append(_extract_pkg_name(imp_pkg))
_check_and_reload_pkg(namespace, pd, _PANDAS_REQ_VERSION, "pd")
if additional_packages:
pkg_success = check_and_install_missing_packages(
additional_packages, user=user_install
)
if not pkg_success:
_err_output("One or more packages failed to install.")
_err_output(
"Please re-run init_notebook() with the parameter user_install=True."
)
# We want to force import lib to see anything that we've
# just installed.
importlib.invalidate_caches()
if extra_imports:
import_list.extend(
_import_extras(nm_spc=namespace, extra_imports=extra_imports)
)
_pr_output("Imported:", ", ".join(imp for imp in import_list if imp))
return True
except ImportError as imp_err:
display(HTML(_IMPORT_ERR_MSSG.format(err=imp_err)))
return False
def _build_import_list(def_imports: str) -> List[Dict[str, str]]:
imports = []
if def_imports.casefold() in ["all", "nb"]:
imports.extend(_NB_IMPORTS)
if def_imports.casefold() in ["all", "msticpy"]:
imports.extend(_MP_IMPORTS)
imports.extend(_MP_IMPORT_ALL)
return imports
_AZ_SENT_ERRS = [
"Missing or empty 'AzureSentinel' section",
"Missing or empty 'Workspaces' key in 'AzureSentinel' section",
]
def _verify_no_azs_errors(errs):
"""Verify none of the Microsoft Sentinel errors appear in `errs`."""
return all(az_err not in errs for az_err in _AZ_SENT_ERRS)
def _get_or_create_config() -> bool:
# Cases
# 1. Env var set and mpconfig exists -> goto 4
# 2. Env var set and mpconfig file not exists - warn and continue
# 3. search_for_file finds mpconfig -> goto 4
# 4. if file and check_file_contents -> return ok
# 5. search_for_file(config.json)
# 6. If config.json -> import into mpconfig and save
# 7. Error - no Microsoft Sentinel config
mp_path = os.environ.get("MSTICPYCONFIG")
if mp_path and not Path(mp_path).is_file():
_err_output(_MISSING_MPCONFIG_ENV_ERR)
if not mp_path or not Path(mp_path).is_file():
mp_path = search_for_file("msticpyconfig.yaml", paths=[".", ".."])
if mp_path:
errs: List[str] = []
try:
std_out_cap = io.StringIO()
with redirect_stdout(std_out_cap):
errs, _ = validate_config(config_file=mp_path)
if errs:
_pr_output(std_out_cap.getvalue())
if _verify_no_azs_errors(errs):
# If the mpconfig has a Microsoft Sentinel config, return here
return True
# pylint: disable=broad-except
except Exception as err:
errs.append(f"Exception while checking configuration:\n{err}")
_pr_output(f"Exception while checking configuration:\n{type(err)} - {err}")
_pr_output("\n".join(traceback.format_tb(err.__traceback__)))
_pr_output("Please report this to msticpy@microsoft.com")
# pylint: enable=broad-except
# Look for a config.json
config_json = search_for_file("config.json", paths=[".", ".."])
if config_json:
# if we found one, use it to populate msticpyconfig.yaml
_populate_config_to_mp_config(mp_path, config_json)
return True
_pr_output("No valid configuration for Microsoft Sentinel found.")
return False
def _populate_config_to_mp_config(mp_path, config_json):
"""Populate new or existing msticpyconfig with settings from config.json."""
mp_path = mp_path or "./msticpyconfig.yaml"
mp_config_convert = MpConfigFile(file=config_json)
azs_settings = mp_config_convert.map_json_to_mp_ws()
def_azs_settings = next(
iter(azs_settings.get("AzureSentinel", {}).get("Workspaces", {}).values())
)
if def_azs_settings:
mp_config_convert.settings["AzureSentinel"]["Workspaces"][
"Default"
] = def_azs_settings.copy()
mssg = f"Created '{mp_path}'' with Microsoft Sentinel settings."
if Path(mp_path).exists():
# If there is an existing file read it in
mp_config_text = Path(mp_path).read_text(encoding="utf-8")
mp_config_settings = yaml.safe_load(mp_config_text)
# update exist settings with the AzSent settings from config.json
mp_config_settings.update(mp_config_convert.settings)
# update MpConfigFile with the merged settings
mp_config_convert.settings = mp_config_settings
mssg = f"Updated '{mp_path}'' with Microsoft Sentinel settings."
# Save the file
mp_config_convert.save_to_file(mp_path, backup=True)
_pr_output(mssg)
def _set_nb_options(namespace):
namespace["WIDGET_DEFAULTS"] = {
"layout": widgets.Layout(width="95%"),
"style": {"description_width": "initial"},
}
# Some of our dependencies (networkx) still use deprecated Matplotlib
# APIs - we can't do anything about it, so suppress them from view
warnings.simplefilter("ignore", category=MatplotlibDeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sns:
sns.set()
pd.set_option("display.max_rows", 100)
pd.set_option("display.max_columns", 50)
pd.set_option("display.max_colwidth", 100)
os.environ["KQLMAGIC_LOAD_MODE"] = "silent"
# Kqlmagic config will use AZ CLI login if available
kql_config = os.environ.get("KQLMAGIC_CONFIGURATION", "")
if "try_azcli_login" not in kql_config:
kql_config = ";".join([kql_config, "try_azcli_login=True"])
os.environ["KQLMAGIC_CONFIGURATION"] = kql_config
def _load_pivots(namespace):
"""Load pivot functions."""
if not Pivot.current:
pivot = Pivot()
namespace["pivot"] = pivot
vt_pivot = None
try:
get_config("TIProviders.VirusTotal")
try:
vt_pivot = importlib.import_module("msticpy.sectools.vtlookupv3.vt_pivot")
namespace["vt_pivot"] = vt_pivot
except ImportError:
# Importing Vt3 libraries failed.
pass
except KeyError:
# No VT settings detected
pass
if vt_pivot:
vt_pivot.add_pivot_functions()
def _import_extras(nm_spc: Dict[str, Any], extra_imports: List[str]):
added_imports = []
if isinstance(extra_imports, str):
extra_imports = [extra_imports]
for imp_spec in extra_imports:
params: List[Optional[str]] = [None, None, None]
for idx, param in enumerate(imp_spec.split(",")):
params[idx] = param.strip() or None
if params[0] is None:
raise MsticpyException(
f"First parameter in extra_imports is mandatory: {imp_spec}"
)
_imp_from_package(nm_spc=nm_spc, pkg=params[0], tgt=params[1], alias=params[2])
added_imports.append(
_extract_pkg_name(pkg=params[0], tgt=params[1], alias=params[2])
)
return added_imports
def _imp_module(nm_spc: Dict[str, Any], module_name: str, alias: str = None):
"""Import named module and assign to global alias."""
try:
mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return None
if alias:
nm_spc[alias] = mod
else:
nm_spc[module_name] = mod
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{module_name} imported (alias={alias})")
return mod
def _imp_module_all(nm_spc: Dict[str, Any], module_name):
"""Import all from named module add to globals."""
try:
imported_mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return
for item in dir(imported_mod):
if item.startswith("_"):
continue
nm_spc[item] = getattr(imported_mod, item)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"All items imported from {module_name}")
def _imp_from_package(
nm_spc: Dict[str, Any], pkg: str, tgt: str = None, alias: str = None
):
"""Import object or submodule from `pkg`."""
if not tgt:
return _imp_module(nm_spc=nm_spc, module_name=pkg, alias=alias)
try:
# target could be a module
obj = importlib.import_module(f".{tgt}", pkg)
except ImportError:
# if not, it must be an attribute (class, func, etc.)
try:
mod = importlib.import_module(pkg)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=pkg))
return None
obj = getattr(mod, tgt)
if alias:
nm_spc[alias] = obj
else:
nm_spc[tgt] = obj
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{tgt} imported from {pkg} (alias={alias})")
return obj
def _check_and_reload_pkg(
nm_spc: Dict[str, Any], pkg: Any, req_version: Tuple[int, ...], alias: str = None
):
"""Check package version matches required version and reload."""
warn_mssg = []
pkg_name = pkg.__name__
if not hasattr(pkg, "__version__"):
raise MsticpyException(f"Package {pkg_name} has no version data.")
pkg_version = tuple(int(v) for v in pkg.__version__.split("."))
if pkg_version < req_version:
_err_output(_MISSING_PKG_WARN.format(package=pkg_name))
resp = (
input("Install the package now? (y/n)") if not unit_testing() else "y"
) # nosec
if resp.casefold().startswith("y"):
warn_mssg.append(f"{pkg_name} was installed or upgraded.")
pip_ver = ".".join(str(elem) for elem in req_version)
pkg_spec = f"{pkg_name}>={pip_ver}"
check_and_install_missing_packages(required_packages=[pkg_spec], user=True)
if pkg_name in sys.modules:
importlib.reload(pkg)
else:
_imp_module(nm_spc, pkg_name, alias=alias)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{pkg_name} imported version {pkg.__version__}")
return warn_mssg
def _hook_ipython_exceptions(func):
"""Hooks the `func` and bypasses it if exception is MsticpyUserException."""
@wraps(func)
def showtraceback(*args, **kwargs):
"""Replace IPython showtraceback."""
# extract exception type, value and traceback
e_type, _, _ = sys.exc_info()
if e_type is not None and issubclass(e_type, MsticpyUserError):
return None
# otherwise run the original hook
return func(*args, **kwargs)
return showtraceback
def _check_azure_cli_status():
"""Check for Azure CLI credentials."""
if not unit_testing():
status, message = check_cli_credentials()
if status == AzureCliStatus.CLI_OK:
_pr_output(message)
elif status == AzureCliStatus.CLI_NOT_INSTALLED:
_pr_output(
"Azure CLI credentials not detected." f" ({_CLI_WIKI_MSSG_SHORT})"
)
elif message:
_pr_output("\n".join([message, _CLI_WIKI_MSSG_GEN]))
| # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Initialization for Jupyter Notebooks."""
import importlib
import io
import os
import sys
import traceback
import warnings
from contextlib import redirect_stdout
from functools import wraps
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import ipywidgets as widgets
import pandas as pd
import yaml
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import HTML, display
from matplotlib import MatplotlibDeprecationWarning
try:
import seaborn as sns
except ImportError:
sns = None
from .._version import VERSION
from ..common.azure_auth_core import check_cli_credentials, AzureCliStatus
from ..common.check_version import check_version
from ..common.exceptions import MsticpyException, MsticpyUserError
from ..common.pkg_config import get_config, validate_config
from ..common.utility import (
check_and_install_missing_packages,
check_kwargs,
is_ipython,
md,
search_for_file,
unit_testing,
)
from ..config import MpConfigFile
from ..datamodel.pivot import Pivot
from .azure_ml_tools import check_versions as check_versions_aml
from .azure_ml_tools import is_in_aml
from .user_config import load_user_defaults
__version__ = VERSION
__author__ = "Ian Hellen"
_IMPORT_ERR_MSSG = """
<h2><font color='red'>One or more missing packages detected</h2>
Please correct these by installing the required packages, restart
the kernel and re-run the notebook.</font>
<i>Package error: {err}</i><br>
"""
_IMPORT_MODULE_MSSG = """
<font color='red'>Error import module {module}</font>
"""
_MISSING_PKG_WARN = """
<h3><font color='orange'>Warning {package} is not installed or has an
incorrect version</h3></font>
"""
_HELP_URIS = [
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'A%20Getting%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Getting Started (notebook)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/'
'ConfiguringNotebookEnvironment.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Configuring your Notebook environment (notebook)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'msticpyconfig.html"'
'target="_blank" rel="noopener noreferrer">'
"Configuring MSTICPy settings (doc)</a></li>"
),
(
'<li><a href="https://msticpy.readthedocs.io/en/latest/getting_started/'
'SettingsEditor.html"'
'target="_blank" rel="noopener noreferrer">'
"MSTICPy settings editor (doc)</a></li>"
),
(
'<li><a href="https://github.com/Azure/Azure-Sentinel-Notebooks/blob/'
'master/TroubleShootingNotebooks.ipynb"'
'target="_blank" rel="noopener noreferrer">'
"Trouble-Shooting Notebooks (notebook)</a></li>"
),
]
_MISSING_MPCONFIG_ENV_ERR = f"""
<h3><font color='orange'>Warning: no <i>msticpyconfig.yaml</i> found</h3></font>
The MSTICPYCONFIG environment variable is set but does not point
to a valid file.<br>
Some functionality (such as Threat Intel lookups) will not function without
valid configuration settings.<br>
The following resources will help you set up your configuration:
<ul>{"".join(_HELP_URIS)}</ul>
<br>You can load and run the first two of these from the Microsoft Sentinel
<b>Notebooks</b> tab
"""
_PANDAS_REQ_VERSION = (0, 25, 0)
def _get_verbosity_setting() -> Callable[[Optional[int]], int]:
"""Closure for holding trace setting."""
_verbosity = 1
def _verbose(verbosity: Optional[int] = None) -> int:
nonlocal _verbosity
if verbosity is not None:
_verbosity = verbosity
return _verbosity
return _verbose
_VERBOSITY: Callable[[Optional[int]], int] = _get_verbosity_setting()
_NB_IMPORTS = [
dict(pkg="pandas", alias="pd"),
dict(pkg="IPython", tgt="get_ipython"),
dict(pkg="IPython.display", tgt="display"),
dict(pkg="IPython.display", tgt="HTML"),
dict(pkg="IPython.display", tgt="Markdown"),
dict(pkg="ipywidgets", alias="widgets"),
dict(pkg="pathlib", tgt="Path"),
dict(pkg="matplotlib.pyplot", alias="plt"),
dict(pkg="matplotlib", tgt="MatplotlibDeprecationWarning"),
dict(pkg="numpy", alias="np"),
]
if sns is not None:
_NB_IMPORTS.append(dict(pkg="seaborn", alias="sns"))
_MP_IMPORTS = [
dict(pkg="msticpy"),
dict(pkg="msticpy.data", tgt="QueryProvider"),
dict(pkg="msticpy.nbtools.foliummap", tgt="FoliumMap"),
dict(pkg="msticpy.common.utility", tgt="md"),
dict(pkg="msticpy.common.utility", tgt="md_warn"),
dict(pkg="msticpy.common.wsconfig", tgt="WorkspaceConfig"),
dict(pkg="msticpy.datamodel.pivot", tgt="Pivot"),
dict(pkg="msticpy.datamodel", tgt="entities"),
dict(pkg="msticpy.vis", tgt="mp_pandas_plot"),
]
_MP_IMPORT_ALL = [
dict(module_name="msticpy.nbtools"),
dict(module_name="msticpy.sectools"),
]
_CONF_URI = (
"https://msticpy.readthedocs.io/en/latest/getting_started/msticpyconfig.html"
)
_AZNB_GUIDE = (
"Please run the <i>Getting Started Guide for Azure Sentinel "
+ "ML Notebooks</i> notebook."
)
_AZ_CLI_WIKI_URI = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/wiki/"
"Caching-credentials-with-Azure-CLI"
)
_CLI_WIKI_MSSG_GEN = (
f"For more information see <a href='{_AZ_CLI_WIKI_URI}'>"
"Caching credentials with Azure CLI</>"
)
_CLI_WIKI_MSSG_SHORT = (
f"see <a href='{_AZ_CLI_WIKI_URI}'>Caching credentials with Azure CLI</>"
)
current_providers: Dict[str, Any] = {} # pylint: disable=invalid-name
def _pr_output(*args):
"""Output to IPython display or print."""
if not _VERBOSITY():
return
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def _err_output(*args):
"""Output to IPython display or print - always output regardless of verbosity."""
if is_ipython():
display(HTML(" ".join([*args, "<br>"]).replace("\n", "<br>")))
else:
print(*args)
def init_notebook(
namespace: Dict[str, Any],
def_imports: str = "all",
additional_packages: List[str] = None,
extra_imports: List[str] = None,
**kwargs,
) -> bool:
"""
Initialize the notebook environment.
Parameters
----------
namespace : Dict[str, Any]
Namespace (usually globals()) into which imports
are to be populated.
def_imports : str, optional
Import default packages. By default "all".
Possible values are:
- "all" - import all packages
- "nb" - import common notebook packages
- "msticpy" - import msticpy packages
- "none" (or any other value) don't load any default packages.
additional_packages : List[str], optional
Additional packages to be pip installed,
by default None.
Packages are specified by name only or version
specification (e.g. "pandas>=0.25")
user_install : bool, optional
Install packages in the "user" rather than system site-packages.
Use this option if you cannot or do not want to update the system
packages.
You should usually avoid using this option with standard Conda environments.
extra_imports : List[str], optional
Additional import definitions, by default None.
Imports are specified as up to 3 comma-delimited values
in a string:
"{source_pkg}, [{import_tgt}], [{alias}]"
`source_pkg` is mandatory - equivalent to a simple "import xyz"
statement.
`{import_tgt}` specifies an object to import from the package
equivalent to "from source_pkg import import_tgt"
`alias` allows renaming of the imported object - equivalent to
the "as alias" part of the import statement.
If you want to provide just `source_pkg` and `alias` include
an additional placeholder comma: e.g. "pandas, , pd"
friendly_exceptions : Optional[bool]
Setting this to True causes msticpy to hook the notebook
exception hander. Any exceptions derived from MsticpyUserException
are displayed but do not produce a stack trace, etc.
Defaults to system/user settings if no value is supplied.
verbose : Union[int, bool], optional
Controls amount if status output, by default 1
0 = No output
1 or False = Brief output (default)
2 or True = Detailed output
no_config_check : bool, optional
Skip the check for valid configuration. Default is False.
verbosity : int, optional
Returns
-------
bool
True if successful
Raises
------
MsticpyException
If extra_imports data format is incorrect.
If package with required version check has no version
information.
"""
global current_providers # pylint: disable=global-statement, invalid-name
check_kwargs(
kwargs,
[
"user_install",
"friendly_exceptions",
"no_config_check",
"verbosity",
"verbose",
],
)
user_install: bool = kwargs.pop("user_install", False)
friendly_exceptions: Optional[bool] = kwargs.pop("friendly_exceptions", None)
no_config_check: bool = kwargs.pop("no_config_check", False)
_set_verbosity(**kwargs)
_pr_output("<hr><h4>Starting Notebook initialization...</h4>")
# Check Azure ML environment
if is_in_aml():
check_versions_aml(*_get_aml_globals(namespace))
else:
# If not in AML check and print version status
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
check_version()
_pr_output(stdout_cap.getvalue())
# Handle required packages and imports
_pr_output("Processing imports....")
imp_ok = _global_imports(
namespace, additional_packages, user_install, extra_imports, def_imports
)
# Configuration check
if no_config_check:
conf_ok = True
else:
_pr_output("Checking configuration....")
conf_ok = _get_or_create_config()
_check_azure_cli_status()
# Notebook options
_pr_output("Setting notebook options....")
_set_nb_options(namespace)
# Set friendly exceptions
if friendly_exceptions is None:
friendly_exceptions = get_config("msticpy.FriendlyExceptions")
if friendly_exceptions:
if _VERBOSITY() == 2: # type: ignore
_pr_output("Friendly exceptions enabled.")
InteractiveShell.showtraceback = _hook_ipython_exceptions(
InteractiveShell.showtraceback
)
# load pivots
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
_load_pivots(namespace=namespace)
_pr_output(stdout_cap.getvalue())
# User defaults
stdout_cap = io.StringIO()
with redirect_stdout(stdout_cap):
prov_dict = load_user_defaults()
_pr_output(stdout_cap.getvalue())
if prov_dict:
namespace.update(prov_dict)
current_providers = prov_dict
_pr_output("Autoloaded components:", ", ".join(prov_dict.keys()))
# show any warnings
init_status = _show_init_warnings(imp_ok, conf_ok)
_pr_output("<h4>Notebook initialization complete</h4>")
return init_status
def _show_init_warnings(imp_ok, conf_ok):
if imp_ok and conf_ok:
return True
md("<font color='orange'><h3>Notebook setup completed with some warnings.</h3>")
if not imp_ok:
md("One or more libraries did not import successfully.")
md(_AZNB_GUIDE)
if not conf_ok:
md("One or more configuration items were missing or set incorrectly.")
md(
_AZNB_GUIDE
+ f" and the <a href='{_CONF_URI}'>msticpy configuration guide</a>."
)
md("This notebook may still run but with reduced functionality.")
return False
def _set_verbosity(**kwargs):
"""Set verbosity of output from boolean or int `verbose` param."""
verbosity = 1
verb_param = kwargs.pop("verbose", kwargs.pop("verbosity", 1))
if isinstance(verb_param, bool):
verbosity = 2 if verb_param else 1
elif isinstance(verb_param, int):
verbosity = min(2, max(0, verb_param))
_VERBOSITY(verbosity)
def list_default_imports():
"""List the default imports for `init_notebook`."""
for imp_group in (_NB_IMPORTS, _MP_IMPORTS):
for imp_item in imp_group:
if "tgt" in imp_item:
import_line = f"from {imp_item['pkg']} import {imp_item['tgt']}"
else:
import_line = f"import {imp_item['pkg']}"
if "alias" in imp_item:
import_line += f" as {imp_item['alias']}"
_pr_output(import_line)
for imp_item in _MP_IMPORT_ALL:
_pr_output(f"from {imp_item['module_name']} import *")
def _extract_pkg_name(
imp_pkg: Optional[Dict[str, str]] = None,
pkg: str = None,
tgt: str = None,
alias: str = None,
) -> str:
"""Return string representation of package import."""
if imp_pkg:
pkg = imp_pkg.get("pkg")
tgt = imp_pkg.get("tgt")
alias = imp_pkg.get("alias")
import_item = f"{pkg}.{tgt}" if tgt else pkg
if alias:
import_item = f"{alias} ({import_item})"
return import_item # type: ignore
PY_VER_VAR = "REQ_PYTHON_VER"
MP_VER_VAR = "REQ_MSTICPY_VER"
MP_EXTRAS = "REQ_MP_EXTRAS"
def _get_aml_globals(namespace: Dict[str, Any]):
"""Return global values if found."""
py_ver = namespace.get(PY_VER_VAR, "3.6")
mp_ver = namespace.get(MP_VER_VAR, __version__)
extras = namespace.get(MP_EXTRAS)
return py_ver, mp_ver, extras
def _global_imports(
namespace: Dict[str, Any],
additional_packages: List[str] = None,
user_install: bool = False,
extra_imports: List[str] = None,
def_imports: str = "all",
):
import_list = []
imports = _build_import_list(def_imports)
try:
for imp_pkg in imports:
_imp_from_package(nm_spc=namespace, **imp_pkg)
import_list.append(_extract_pkg_name(imp_pkg))
_check_and_reload_pkg(namespace, pd, _PANDAS_REQ_VERSION, "pd")
if additional_packages:
pkg_success = check_and_install_missing_packages(
additional_packages, user=user_install
)
if not pkg_success:
_err_output("One or more packages failed to install.")
_err_output(
"Please re-run init_notebook() with the parameter user_install=True."
)
# We want to force import lib to see anything that we've
# just installed.
importlib.invalidate_caches()
if extra_imports:
import_list.extend(
_import_extras(nm_spc=namespace, extra_imports=extra_imports)
)
_pr_output("Imported:", ", ".join(imp for imp in import_list if imp))
return True
except ImportError as imp_err:
display(HTML(_IMPORT_ERR_MSSG.format(err=imp_err)))
return False
def _build_import_list(def_imports: str) -> List[Dict[str, str]]:
imports = []
if def_imports.casefold() in ["all", "nb"]:
imports.extend(_NB_IMPORTS)
if def_imports.casefold() in ["all", "msticpy"]:
imports.extend(_MP_IMPORTS)
imports.extend(_MP_IMPORT_ALL)
return imports
_AZ_SENT_ERRS = [
"Missing or empty 'AzureSentinel' section",
"Missing or empty 'Workspaces' key in 'AzureSentinel' section",
]
def _verify_no_azs_errors(errs):
"""Verify none of the Microsoft Sentinel errors appear in `errs`."""
return all(az_err not in errs for az_err in _AZ_SENT_ERRS)
def _get_or_create_config() -> bool:
# Cases
# 1. Env var set and mpconfig exists -> goto 4
# 2. Env var set and mpconfig file not exists - warn and continue
# 3. search_for_file finds mpconfig -> goto 4
# 4. if file and check_file_contents -> return ok
# 5. search_for_file(config.json)
# 6. If config.json -> import into mpconfig and save
# 7. Error - no Microsoft Sentinel config
mp_path = os.environ.get("MSTICPYCONFIG")
if mp_path and not Path(mp_path).is_file():
_err_output(_MISSING_MPCONFIG_ENV_ERR)
if not mp_path or not Path(mp_path).is_file():
mp_path = search_for_file("msticpyconfig.yaml", paths=[".", ".."])
if mp_path:
errs: List[str] = []
try:
std_out_cap = io.StringIO()
with redirect_stdout(std_out_cap):
errs, _ = validate_config(config_file=mp_path)
if errs:
_pr_output(std_out_cap.getvalue())
if _verify_no_azs_errors(errs):
# If the mpconfig has a Microsoft Sentinel config, return here
return True
# pylint: disable=broad-except
except Exception as err:
errs.append(f"Exception while checking configuration:\n{err}")
_pr_output(f"Exception while checking configuration:\n{type(err)} - {err}")
_pr_output("\n".join(traceback.format_tb(err.__traceback__)))
_pr_output("Please report this to msticpy@microsoft.com")
# pylint: enable=broad-except
# Look for a config.json
config_json = search_for_file("config.json", paths=[".", ".."])
if config_json:
# if we found one, use it to populate msticpyconfig.yaml
_populate_config_to_mp_config(mp_path, config_json)
return True
_pr_output("No valid configuration for Microsoft Sentinel found.")
return False
def _populate_config_to_mp_config(mp_path, config_json):
"""Populate new or existing msticpyconfig with settings from config.json."""
mp_path = mp_path or "./msticpyconfig.yaml"
mp_config_convert = MpConfigFile(file=config_json)
azs_settings = mp_config_convert.map_json_to_mp_ws()
def_azs_settings = next(
iter(azs_settings.get("AzureSentinel", {}).get("Workspaces", {}).values())
)
if def_azs_settings:
mp_config_convert.settings["AzureSentinel"]["Workspaces"][
"Default"
] = def_azs_settings.copy()
mssg = f"Created '{mp_path}'' with Microsoft Sentinel settings."
if Path(mp_path).exists():
# If there is an existing file read it in
mp_config_text = Path(mp_path).read_text(encoding="utf-8")
mp_config_settings = yaml.safe_load(mp_config_text)
# update exist settings with the AzSent settings from config.json
mp_config_settings.update(mp_config_convert.settings)
# update MpConfigFile with the merged settings
mp_config_convert.settings = mp_config_settings
mssg = f"Updated '{mp_path}'' with Microsoft Sentinel settings."
# Save the file
mp_config_convert.save_to_file(mp_path, backup=True)
_pr_output(mssg)
def _set_nb_options(namespace):
namespace["WIDGET_DEFAULTS"] = {
"layout": widgets.Layout(width="95%"),
"style": {"description_width": "initial"},
}
# Some of our dependencies (networkx) still use deprecated Matplotlib
# APIs - we can't do anything about it, so suppress them from view
warnings.simplefilter("ignore", category=MatplotlibDeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sns:
sns.set()
pd.set_option("display.max_rows", 100)
pd.set_option("display.max_columns", 50)
pd.set_option("display.max_colwidth", 100)
os.environ["KQLMAGIC_LOAD_MODE"] = "silent"
# Kqlmagic config will use AZ CLI login if available
kql_config = os.environ.get("KQLMAGIC_CONFIGURATION", "")
if "try_azcli_login" not in kql_config:
kql_config = ";".join([kql_config, "try_azcli_login=True"])
os.environ["KQLMAGIC_CONFIGURATION"] = kql_config
def _load_pivots(namespace):
"""Load pivot functions."""
if not Pivot.current:
pivot = Pivot()
namespace["pivot"] = pivot
vt_pivot = None
try:
get_config("TIProviders.VirusTotal")
try:
vt_pivot = importlib.import_module("msticpy.sectools.vtlookupv3.vt_pivot")
namespace["vt_pivot"] = vt_pivot
except ImportError:
# Importing Vt3 libraries failed.
pass
except KeyError:
# No VT settings detected
pass
if vt_pivot:
vt_pivot.add_pivot_functions()
def _import_extras(nm_spc: Dict[str, Any], extra_imports: List[str]):
added_imports = []
if isinstance(extra_imports, str):
extra_imports = [extra_imports]
for imp_spec in extra_imports:
params: List[Optional[str]] = [None, None, None]
for idx, param in enumerate(imp_spec.split(",")):
params[idx] = param.strip() or None
if params[0] is None:
raise MsticpyException(
f"First parameter in extra_imports is mandatory: {imp_spec}"
)
_imp_from_package(nm_spc=nm_spc, pkg=params[0], tgt=params[1], alias=params[2])
added_imports.append(
_extract_pkg_name(pkg=params[0], tgt=params[1], alias=params[2])
)
return added_imports
def _imp_module(nm_spc: Dict[str, Any], module_name: str, alias: str = None):
"""Import named module and assign to global alias."""
try:
mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return None
if alias:
nm_spc[alias] = mod
else:
nm_spc[module_name] = mod
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{module_name} imported (alias={alias})")
return mod
def _imp_module_all(nm_spc: Dict[str, Any], module_name):
"""Import all from named module add to globals."""
try:
imported_mod = importlib.import_module(module_name)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=module_name))
return
for item in dir(imported_mod):
if item.startswith("_"):
continue
nm_spc[item] = getattr(imported_mod, item)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"All items imported from {module_name}")
def _imp_from_package(
nm_spc: Dict[str, Any], pkg: str, tgt: str = None, alias: str = None
):
"""Import object or submodule from `pkg`."""
if not tgt:
return _imp_module(nm_spc=nm_spc, module_name=pkg, alias=alias)
try:
# target could be a module
obj = importlib.import_module(f".{tgt}", pkg)
except ImportError:
# if not, it must be an attribute (class, func, etc.)
try:
mod = importlib.import_module(pkg)
except ImportError:
_err_output(_IMPORT_MODULE_MSSG.format(module=pkg))
return None
obj = getattr(mod, tgt)
if alias:
nm_spc[alias] = obj
else:
nm_spc[tgt] = obj
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{tgt} imported from {pkg} (alias={alias})")
return obj
def _check_and_reload_pkg(
nm_spc: Dict[str, Any], pkg: Any, req_version: Tuple[int, ...], alias: str = None
):
"""Check package version matches required version and reload."""
warn_mssg = []
pkg_name = pkg.__name__
if not hasattr(pkg, "__version__"):
raise MsticpyException(f"Package {pkg_name} has no version data.")
pkg_version = tuple(int(v) for v in pkg.__version__.split("."))
if pkg_version < req_version:
_err_output(_MISSING_PKG_WARN.format(package=pkg_name))
resp = (
input("Install the package now? (y/n)") if not unit_testing() else "y"
) # nosec
if resp.casefold().startswith("y"):
warn_mssg.append(f"{pkg_name} was installed or upgraded.")
pip_ver = ".".join(str(elem) for elem in req_version)
pkg_spec = f"{pkg_name}>={pip_ver}"
check_and_install_missing_packages(required_packages=[pkg_spec], user=True)
if pkg_name in sys.modules:
importlib.reload(pkg)
else:
_imp_module(nm_spc, pkg_name, alias=alias)
if _VERBOSITY() == 2: # type: ignore
_pr_output(f"{pkg_name} imported version {pkg.__version__}")
return warn_mssg
def _hook_ipython_exceptions(func):
"""Hooks the `func` and bypasses it if exception is MsticpyUserException."""
@wraps(func)
def showtraceback(*args, **kwargs):
"""Replace IPython showtraceback."""
# extract exception type, value and traceback
e_type, _, _ = sys.exc_info()
if e_type is not None and issubclass(e_type, MsticpyUserError):
return None
# otherwise run the original hook
return func(*args, **kwargs)
return showtraceback
def _check_azure_cli_status():
"""Check for Azure CLI credentials."""
if not unit_testing():
status, message = check_cli_credentials()
if status == AzureCliStatus.CLI_OK:
_pr_output(message)
elif status == AzureCliStatus.CLI_NOT_INSTALLED:
_pr_output(
"Azure CLI credentials not detected." f" ({_CLI_WIKI_MSSG_SHORT})"
)
elif message:
_pr_output("\n".join([message, _CLI_WIKI_MSSG_GEN]))
|
"""
Combine, reformat and trim single simulation trajectories.
Output: tracjectoriesDat including all outcome channels, and trajectoriesDat_trim including key channels only
If number of trajectories exceeds a specified limit, multiple trajectories in chunks will be returned.
"""
import argparse
import subprocess
import pandas as pd
import os
import shutil
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-exp",
"--exp_name",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
parser.add_argument(
"--time_start",
type=int,
help="Lower limit of time steps to keep",
default=1
)
parser.add_argument(
"--time_stop",
type=int,
help="Upper limit of time steps to keep",
default=1000
)
parser.add_argument(
"-limit",
"--scen_limit",
type=int,
help="Number of simulations to combine",
default = 700
)
parser.add_argument(
"--additional_sample_param",
type=str,
nargs='+',
help="""Name of additional sample parameters to keep, reduced to minimum to reduce file size
format: --additional_sample_param time_to_infectious time_to_death (no quotes)
Note: sample parameters can also always be added from the sample_parameters.csv if required in the postprocessing""",
default = ''
)
parser.add_argument(
"--delete_trajectories",
action='store_true',
help="If specified, single trajectories will be deleted after postprocessing.",
)
return parser.parse_args()
def reprocess(input_fname='trajectories.csv'):
fname = os.path.join(git_dir, input_fname)
row_df = pd.read_csv(fname, skiprows=1)
df = row_df.set_index('sampletimes').transpose()
run_time = len([x for x in df.columns.values if '{0}' in x])
num_runs = int((len(row_df)) / run_time)
df = df.reset_index(drop=False)
df = df.rename(columns={'index': 'time'})
df['time'] = df['time'].astype(float)
adf = pd.DataFrame()
for run_num in range(num_runs):
channels = [x for x in df.columns.values if '{%d}' % run_num in x]
sdf = df[['time'] + channels]
sdf = sdf.rename(columns={
x: x.split('{')[0] for x in channels
})
sdf['run_num'] = run_num
adf = pd.concat([adf, sdf])
adf = adf.reset_index()
del adf['index']
return adf
def trim_trajectories(df, fname,sample_param_to_keep, time_start=1, time_stop=1000,
time_varying_params=None, grpnames=None):
"""Generate a subset of the trajectoriesDat dataframe
The new csv file is saved under trajectoriesDat_trim.csv, no dataframe is returned
"""
channels = ['susceptible', 'infected', 'recovered', 'infected_cumul', 'detected_cumul',
'asymp_cumul', 'asymp', 'asymp_det_cumul',
'symp_mild_cumul', 'symp_mild', 'symp_mild_det_cumul',
'symp_severe_cumul','symp_severe', 'symp_severe_det_cumul',
'hosp_det_cumul', 'hosp_cumul', 'hosp_det', 'hospitalized',
'crit_cumul','crit_det_cumul', 'crit_det', 'critical',
'deaths_det_cumul', 'deaths']
if time_varying_params == None:
time_varying_params = ['Ki_t']
column_list = ['time', 'run_num'] + sample_param_to_keep
if grpnames is not None:
if len(grpnames) >1:
for grp in grpnames:
grp_ch = str(grp.replace('_', '-'))
[column_list.append(f'{channel}_{str(grp_ch)}') for channel in channels]
if grp_ch !="All" and not 'age' in grp_ch :
[column_list.append(f'{time_varying_param}_{str(grp_ch)}') for time_varying_param in time_varying_params]
del grp, grp_ch
column_list = column_list + ['N_All']
else:
grp = grpnames[0]
grp_ch = str(grp.replace('_', '-'))
[column_list.append(f'{channel}_{str(grp_ch)}') for channel in channels]
if not 'age' in grp_ch :
[column_list.append(f'{time_varying_param}_{str(grp_ch)}') for time_varying_param in time_varying_params]
else:
column_list = column_list + channels + time_varying_params
"""Trim df and save"""
df = df[column_list]
df = df[df['time'] > time_start]
df = df[df['time'] < time_stop]
df.to_csv(os.path.join(exp_path, fname + '_trim.csv'), index=False, date_format='%Y-%m-%d')
def combine_trajectories(sampledf, Nscenarios_start=0, Nscenarios_stop=1000, fname='trajectoriesDat.csv',SAVE=True):
df_list = []
n_errors = 0
for scen_i in range(Nscenarios_start, Nscenarios_stop):
input_name = "trajectories_scen" + str(scen_i) + ".csv"
try:
df_i = reprocess(os.path.join(trajectories_path, input_name))
df_i['scen_num'] = scen_i
df_i = df_i.merge(sampledf, on=['scen_num'])
df_list.append(df_i)
except:
n_errors += 1
continue
print("Number of errors:" + str(n_errors))
try:
dfc = pd.concat(df_list)
dfc = dfc.dropna()
if SAVE:
dfc.to_csv(os.path.join(exp_path, fname), index=False, date_format='%Y-%m-%d')
except ValueError:
print('WARNING: No objects to concatenate - either no trajectories or n_scen_limit size is too small')
dfc = pd.DataFrame()
return dfc
def combine_trajectories_chunks(grp_list, useTrim=True):
"""workaround for using EMS vs region in filename for spatial model and keep suffix also for 'All'"""
if len(grp_list) == 1:
grp_save_suffix = grp_list[0][:3]
else:
grp_save_suffix = [grp for grp in grp_list[1:]][0][:3]
if grp_save_suffix == 'EMS': grp_save_suffix = 'region' # opposite to i.e. age model
files = os.listdir(exp_path)
files = [file for file in files if '.csv' in file ]
files = [file for file in files if not grp_save_suffix in file ]
files = [file for file in files if 'trajectories' in file]
files_not_trim = [file for file in files if not 'trim' in file]
files_trim = [file for file in files if 'trim' in file]
if useTrim:
files = files_trim
[os.unlink(os.path.join(exp_path, file)) for file in files_not_trim]
del files_trim, files_not_trim
else:
files = files_not_trim
[os.unlink(os.path.join(exp_path, file)) for file in files_trim]
del files_trim, files_not_trim
for i, grp in enumerate(grp_list):
print(f'Combine trajectories for {grp}')
"""extract grp suffix, might need to be applicable for age model or any other grp"""
grp_suffix = grp[:3]
df_all = pd.DataFrame()
for file in files:
df_f = pd.read_csv(os.path.join(exp_path, file))
df_cols = df_f.columns
outcome_cols = [df_col for df_col in df_cols if grp_suffix in df_col or 'All' in df_col ]
outcomeVars_to_drop = [outcome_col for outcome_col in outcome_cols if not grp in outcome_col]
outcomeVars_to_drop = [outcome_col for outcome_col in outcomeVars_to_drop if not grp.replace(f'{grp_suffix}_',f'{grp_suffix}-') in outcome_col]
df_f = df_f.drop(outcomeVars_to_drop, axis=1)
if df_all.empty:
df_all = df_f
else:
df_all.append(df_f)
del df_f
fname = f'trajectoriesDat_{grp_save_suffix}_{grp.replace(f'{grp_suffix}-','')}'
if useTrim: fname = f'{fname}_trim'
df_all.to_csv(os.path.join(exp_path, f'{fname}.csv'), index=False, date_format='%Y-%m-%d')
if i ==0:
write_report(nscenarios_processed= len(df_all['scen_num'].unique()))
[os.unlink(os.path.join(exp_path,file)) for file in files]
def write_report(nscenarios_processed):
trackScen = f'Number of scenarios processed n= {str(nscenarios_processed)} out of total ' \
f'N= {str(Nscenario)} ({str(nscenarios_processed / Nscenario)} %)'
file = open(os.path.join(exp_path, "Simulation_report.txt"), 'w')
file.write(trackScen)
file.close()
if __name__ == '__main__':
args = parse_args()
exp_name = args.exp_name
time_start = args.time_start
time_stop = args.time_stop
Location = args.Location
additional_sample_param = args.additional_sample_param
Scenario_save_limit = args.scen_limit
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
sim_out_dir = os.path.join(wdir, "simulation_output")
if not os.path.exists(os.path.join(sim_out_dir,exp_name)):
sim_out_dir = os.path.join(git_dir, "_temp")
print(f'Processing trajectories from {sim_out_dir}')
exp_path = os.path.join(sim_out_dir, exp_name)
trajectories_path = os.path.join(exp_path, 'trajectories')
"""Define model type and grp suffix of parameters and outcome channels"""
traj = [t for t in os.listdir(os.path.join(exp_path,"trajectories")) if ".csv" in t][0]
trajdf = pd.read_csv(os.path.join(exp_path,"trajectories", traj), skiprows=1)
channels = [ch for ch in trajdf['sampletimes'].values if "Ki_t" in ch]
grp_list = [ch.replace("{0}","").replace("Ki_t_","") for ch in channels]
grp_suffix = grp_list[0][:3]
"""Define parameters to keep"""
sample_param_to_keep = ['startdate', 'scen_num', 'sample_num']
if isinstance(additional_sample_param, list): sample_param_to_keep = sample_param_to_keep + additional_sample_param
try:
sampledf = pd.read_csv(os.path.join(exp_path, "sampled_parameters.csv"), usecols= sample_param_to_keep)
except:
"""when running from input csv sample_num might be missing"""
sample_param_to_keep = ['startdate', 'scen_num']
if isinstance(additional_sample_param, list): sample_param_to_keep = sample_param_to_keep + additional_sample_param
sampledf = pd.read_csv(os.path.join(exp_path, "sampled_parameters.csv"), usecols= sample_param_to_keep)
sample_param_to_keep = sample_param_to_keep + ['sample_num']
sampledf['sample_num'] = 0
Nscenario = max(sampledf['scen_num'])
if Nscenario <= Scenario_save_limit:
fname = "trajectoriesDat.csv"
if not os.path.exists(os.path.join(exp_path, fname)):
dfc = combine_trajectories(sampledf=sampledf,
Nscenarios_start=0,
Nscenarios_stop=Nscenario + 1,
fname=fname)
else:
dfc = pd.read_csv(os.path.join(exp_path, fname))
"""Update group names"""
grp_list, grp_suffix,grp_numbers = get_group_names(exp_path=exp_path)
trim_trajectories(df=dfc,
sample_param_to_keep = sample_param_to_keep,
time_start=time_start,
time_stop=time_stop,
grpnames = grp_list ,
fname=fname.split(".csv")[0])
write_report(nscenarios_processed= len(dfc['scen_num'].unique()))
if Nscenario > Scenario_save_limit:
n_subsets = int(Nscenario/Scenario_save_limit)
"""Combine trajectories in specified chunks for n subsets"""
for i in range(1,n_subsets+2):
if i ==1 : Nscenario_stop=Scenario_save_limit
if i > 1 : Nscenario_stop = Nscenario_stop + Scenario_save_limit
print(Nscenario_stop)
Nscenarios_start = Nscenario_stop-Scenario_save_limit
fname = 'trajectoriesDat_'+str(Nscenario_stop)+'.csv'
if not os.path.exists(os.path.join(exp_path, fname)):
dfc = combine_trajectories(sampledf=sampledf,
Nscenarios_start=Nscenarios_start,
Nscenarios_stop=Nscenario_stop,
fname=fname)
else:
dfc = pd.read_csv(os.path.join(exp_path, fname))
"""Trim trajectories"""
if not dfc.empty:
trim_trajectories(df=dfc,
sample_param_to_keep=sample_param_to_keep,
time_start=time_start,
time_stop=time_stop,
grpnames=grp_list,
fname=fname.split(".csv")[0])
del dfc
else:
print(f'WARNING: No trajectories found for scenarios {Nscenarios_start} to {Nscenario_stop}')
continue
"""Combine trajectory scenario batches per grp,
if grpList not specified default to spatial (EMS) model,
deletes the trajectory chunks when done"""
combine_trajectories_chunks(grp_list= grp_list)
if args.delete_trajectories:
"""THIS WILL DELETE ALL SINGLE TRAJECTORIES!"""
shutil.rmtree(trajectories_path, ignore_errors=True)
print(f'Single trajectories deleted')
""" Start parallel rt estimation per trajectory """
# FIXME permission denied
#if Location == "NUCLUSTER" :
# exp_dir = os.path.join(sim_out_dir, exp_name)
# p = os.path.join(exp_dir, 'submit_runRtEstimation_trajectories.sh')
# subprocess.call([p]) | """
Combine, reformat and trim single simulation trajectories.
Output: tracjectoriesDat including all outcome channels, and trajectoriesDat_trim including key channels only
If number of trajectories exceeds a specified limit, multiple trajectories in chunks will be returned.
"""
import argparse
import subprocess
import pandas as pd
import os
import shutil
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-exp",
"--exp_name",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
parser.add_argument(
"--time_start",
type=int,
help="Lower limit of time steps to keep",
default=1
)
parser.add_argument(
"--time_stop",
type=int,
help="Upper limit of time steps to keep",
default=1000
)
parser.add_argument(
"-limit",
"--scen_limit",
type=int,
help="Number of simulations to combine",
default = 700
)
parser.add_argument(
"--additional_sample_param",
type=str,
nargs='+',
help="""Name of additional sample parameters to keep, reduced to minimum to reduce file size
format: --additional_sample_param time_to_infectious time_to_death (no quotes)
Note: sample parameters can also always be added from the sample_parameters.csv if required in the postprocessing""",
default = ''
)
parser.add_argument(
"--delete_trajectories",
action='store_true',
help="If specified, single trajectories will be deleted after postprocessing.",
)
return parser.parse_args()
def reprocess(input_fname='trajectories.csv'):
fname = os.path.join(git_dir, input_fname)
row_df = pd.read_csv(fname, skiprows=1)
df = row_df.set_index('sampletimes').transpose()
run_time = len([x for x in df.columns.values if '{0}' in x])
num_runs = int((len(row_df)) / run_time)
df = df.reset_index(drop=False)
df = df.rename(columns={'index': 'time'})
df['time'] = df['time'].astype(float)
adf = pd.DataFrame()
for run_num in range(num_runs):
channels = [x for x in df.columns.values if '{%d}' % run_num in x]
sdf = df[['time'] + channels]
sdf = sdf.rename(columns={
x: x.split('{')[0] for x in channels
})
sdf['run_num'] = run_num
adf = pd.concat([adf, sdf])
adf = adf.reset_index()
del adf['index']
return adf
def trim_trajectories(df, fname,sample_param_to_keep, time_start=1, time_stop=1000,
time_varying_params=None, grpnames=None):
"""Generate a subset of the trajectoriesDat dataframe
The new csv file is saved under trajectoriesDat_trim.csv, no dataframe is returned
"""
channels = ['susceptible', 'infected', 'recovered', 'infected_cumul', 'detected_cumul',
'asymp_cumul', 'asymp', 'asymp_det_cumul',
'symp_mild_cumul', 'symp_mild', 'symp_mild_det_cumul',
'symp_severe_cumul','symp_severe', 'symp_severe_det_cumul',
'hosp_det_cumul', 'hosp_cumul', 'hosp_det', 'hospitalized',
'crit_cumul','crit_det_cumul', 'crit_det', 'critical',
'deaths_det_cumul', 'deaths']
if time_varying_params == None:
time_varying_params = ['Ki_t']
column_list = ['time', 'run_num'] + sample_param_to_keep
if grpnames is not None:
if len(grpnames) >1:
for grp in grpnames:
grp_ch = str(grp.replace('_', '-'))
[column_list.append(f'{channel}_{str(grp_ch)}') for channel in channels]
if grp_ch !="All" and not 'age' in grp_ch :
[column_list.append(f'{time_varying_param}_{str(grp_ch)}') for time_varying_param in time_varying_params]
del grp, grp_ch
column_list = column_list + ['N_All']
else:
grp = grpnames[0]
grp_ch = str(grp.replace('_', '-'))
[column_list.append(f'{channel}_{str(grp_ch)}') for channel in channels]
if not 'age' in grp_ch :
[column_list.append(f'{time_varying_param}_{str(grp_ch)}') for time_varying_param in time_varying_params]
else:
column_list = column_list + channels + time_varying_params
"""Trim df and save"""
df = df[column_list]
df = df[df['time'] > time_start]
df = df[df['time'] < time_stop]
df.to_csv(os.path.join(exp_path, fname + '_trim.csv'), index=False, date_format='%Y-%m-%d')
def combine_trajectories(sampledf, Nscenarios_start=0, Nscenarios_stop=1000, fname='trajectoriesDat.csv',SAVE=True):
df_list = []
n_errors = 0
for scen_i in range(Nscenarios_start, Nscenarios_stop):
input_name = "trajectories_scen" + str(scen_i) + ".csv"
try:
df_i = reprocess(os.path.join(trajectories_path, input_name))
df_i['scen_num'] = scen_i
df_i = df_i.merge(sampledf, on=['scen_num'])
df_list.append(df_i)
except:
n_errors += 1
continue
print("Number of errors:" + str(n_errors))
try:
dfc = pd.concat(df_list)
dfc = dfc.dropna()
if SAVE:
dfc.to_csv(os.path.join(exp_path, fname), index=False, date_format='%Y-%m-%d')
except ValueError:
print('WARNING: No objects to concatenate - either no trajectories or n_scen_limit size is too small')
dfc = pd.DataFrame()
return dfc
def combine_trajectories_chunks(grp_list, useTrim=True):
"""workaround for using EMS vs region in filename for spatial model and keep suffix also for 'All'"""
if len(grp_list) == 1:
grp_save_suffix = grp_list[0][:3]
else:
grp_save_suffix = [grp for grp in grp_list[1:]][0][:3]
if grp_save_suffix == 'EMS': grp_save_suffix = 'region' # opposite to i.e. age model
files = os.listdir(exp_path)
files = [file for file in files if '.csv' in file ]
files = [file for file in files if not grp_save_suffix in file ]
files = [file for file in files if 'trajectories' in file]
files_not_trim = [file for file in files if not 'trim' in file]
files_trim = [file for file in files if 'trim' in file]
if useTrim:
files = files_trim
[os.unlink(os.path.join(exp_path, file)) for file in files_not_trim]
del files_trim, files_not_trim
else:
files = files_not_trim
[os.unlink(os.path.join(exp_path, file)) for file in files_trim]
del files_trim, files_not_trim
for i, grp in enumerate(grp_list):
print(f'Combine trajectories for {grp}')
"""extract grp suffix, might need to be applicable for age model or any other grp"""
grp_suffix = grp[:3]
df_all = pd.DataFrame()
for file in files:
df_f = pd.read_csv(os.path.join(exp_path, file))
df_cols = df_f.columns
outcome_cols = [df_col for df_col in df_cols if grp_suffix in df_col or 'All' in df_col ]
outcomeVars_to_drop = [outcome_col for outcome_col in outcome_cols if not grp in outcome_col]
outcomeVars_to_drop = [outcome_col for outcome_col in outcomeVars_to_drop if not grp.replace(f'{grp_suffix}_',f'{grp_suffix}-') in outcome_col]
df_f = df_f.drop(outcomeVars_to_drop, axis=1)
if df_all.empty:
df_all = df_f
else:
df_all.append(df_f)
del df_f
fname = f'trajectoriesDat_{grp_save_suffix}_{grp.replace(f"{grp_suffix}-","")}'
if useTrim: fname = f'{fname}_trim'
df_all.to_csv(os.path.join(exp_path, f'{fname}.csv'), index=False, date_format='%Y-%m-%d')
if i ==0:
write_report(nscenarios_processed= len(df_all['scen_num'].unique()))
[os.unlink(os.path.join(exp_path,file)) for file in files]
def write_report(nscenarios_processed):
trackScen = f'Number of scenarios processed n= {str(nscenarios_processed)} out of total ' \
f'N= {str(Nscenario)} ({str(nscenarios_processed / Nscenario)} %)'
file = open(os.path.join(exp_path, "Simulation_report.txt"), 'w')
file.write(trackScen)
file.close()
if __name__ == '__main__':
args = parse_args()
exp_name = args.exp_name
time_start = args.time_start
time_stop = args.time_stop
Location = args.Location
additional_sample_param = args.additional_sample_param
Scenario_save_limit = args.scen_limit
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
sim_out_dir = os.path.join(wdir, "simulation_output")
if not os.path.exists(os.path.join(sim_out_dir,exp_name)):
sim_out_dir = os.path.join(git_dir, "_temp")
print(f'Processing trajectories from {sim_out_dir}')
exp_path = os.path.join(sim_out_dir, exp_name)
trajectories_path = os.path.join(exp_path, 'trajectories')
"""Define model type and grp suffix of parameters and outcome channels"""
traj = [t for t in os.listdir(os.path.join(exp_path,"trajectories")) if ".csv" in t][0]
trajdf = pd.read_csv(os.path.join(exp_path,"trajectories", traj), skiprows=1)
channels = [ch for ch in trajdf['sampletimes'].values if "Ki_t" in ch]
grp_list = [ch.replace("{0}","").replace("Ki_t_","") for ch in channels]
grp_suffix = grp_list[0][:3]
"""Define parameters to keep"""
sample_param_to_keep = ['startdate', 'scen_num', 'sample_num']
if isinstance(additional_sample_param, list): sample_param_to_keep = sample_param_to_keep + additional_sample_param
try:
sampledf = pd.read_csv(os.path.join(exp_path, "sampled_parameters.csv"), usecols= sample_param_to_keep)
except:
"""when running from input csv sample_num might be missing"""
sample_param_to_keep = ['startdate', 'scen_num']
if isinstance(additional_sample_param, list): sample_param_to_keep = sample_param_to_keep + additional_sample_param
sampledf = pd.read_csv(os.path.join(exp_path, "sampled_parameters.csv"), usecols= sample_param_to_keep)
sample_param_to_keep = sample_param_to_keep + ['sample_num']
sampledf['sample_num'] = 0
Nscenario = max(sampledf['scen_num'])
if Nscenario <= Scenario_save_limit:
fname = "trajectoriesDat.csv"
if not os.path.exists(os.path.join(exp_path, fname)):
dfc = combine_trajectories(sampledf=sampledf,
Nscenarios_start=0,
Nscenarios_stop=Nscenario + 1,
fname=fname)
else:
dfc = pd.read_csv(os.path.join(exp_path, fname))
"""Update group names"""
grp_list, grp_suffix,grp_numbers = get_group_names(exp_path=exp_path)
trim_trajectories(df=dfc,
sample_param_to_keep = sample_param_to_keep,
time_start=time_start,
time_stop=time_stop,
grpnames = grp_list ,
fname=fname.split(".csv")[0])
write_report(nscenarios_processed= len(dfc['scen_num'].unique()))
if Nscenario > Scenario_save_limit:
n_subsets = int(Nscenario/Scenario_save_limit)
"""Combine trajectories in specified chunks for n subsets"""
for i in range(1,n_subsets+2):
if i ==1 : Nscenario_stop=Scenario_save_limit
if i > 1 : Nscenario_stop = Nscenario_stop + Scenario_save_limit
print(Nscenario_stop)
Nscenarios_start = Nscenario_stop-Scenario_save_limit
fname = 'trajectoriesDat_'+str(Nscenario_stop)+'.csv'
if not os.path.exists(os.path.join(exp_path, fname)):
dfc = combine_trajectories(sampledf=sampledf,
Nscenarios_start=Nscenarios_start,
Nscenarios_stop=Nscenario_stop,
fname=fname)
else:
dfc = pd.read_csv(os.path.join(exp_path, fname))
"""Trim trajectories"""
if not dfc.empty:
trim_trajectories(df=dfc,
sample_param_to_keep=sample_param_to_keep,
time_start=time_start,
time_stop=time_stop,
grpnames=grp_list,
fname=fname.split(".csv")[0])
del dfc
else:
print(f'WARNING: No trajectories found for scenarios {Nscenarios_start} to {Nscenario_stop}')
continue
"""Combine trajectory scenario batches per grp,
if grpList not specified default to spatial (EMS) model,
deletes the trajectory chunks when done"""
combine_trajectories_chunks(grp_list= grp_list)
if args.delete_trajectories:
"""THIS WILL DELETE ALL SINGLE TRAJECTORIES!"""
shutil.rmtree(trajectories_path, ignore_errors=True)
print(f'Single trajectories deleted')
""" Start parallel rt estimation per trajectory """
# FIXME permission denied
#if Location == "NUCLUSTER" :
# exp_dir = os.path.join(sim_out_dir, exp_name)
# p = os.path.join(exp_dir, 'submit_runRtEstimation_trajectories.sh')
# subprocess.call([p]) |
import errno
import glob
import json
import os
from queue import Queue
import shlex
import shutil
import stat
import subprocess
import threading
import time
from typing import Callable
import webbrowser
import bpy
import arm.assets as assets
from arm.exporter import ArmoryExporter
import arm.lib.make_datas
import arm.lib.server
import arm.live_patch as live_patch
import arm.log as log
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_state as state
import arm.make_world as make_world
import arm.utils
import arm.write_data as write_data
if arm.is_reload(__name__):
assets = arm.reload_module(assets)
arm.exporter = arm.reload_module(arm.exporter)
from arm.exporter import ArmoryExporter
arm.lib.make_datas = arm.reload_module(arm.lib.make_datas)
arm.lib.server = arm.reload_module(arm.lib.server)
live_patch = arm.reload_module(live_patch)
log = arm.reload_module(log)
make_logic = arm.reload_module(make_logic)
make_renderpath = arm.reload_module(make_renderpath)
state = arm.reload_module(state)
make_world = arm.reload_module(make_world)
arm.utils = arm.reload_module(arm.utils)
write_data = arm.reload_module(write_data)
else:
arm.enable_reload(__name__)
scripts_mtime = 0 # Monitor source changes
profile_time = 0
# Queue of threads and their done callbacks. Item format: [thread, done]
thread_callback_queue = Queue(maxsize=0)
def run_proc(cmd, done: Callable) -> subprocess.Popen:
"""Creates a subprocess with the given command and returns it.
If Blender is not running in background mode, a thread is spawned
that waits until the subprocess has finished executing to not freeze
the UI, otherwise (in background mode) execution is blocked until
the subprocess has finished.
If `done` is not `None`, it is called afterwards in the main thread.
"""
use_thread = not bpy.app.background
def wait_for_proc(proc: subprocess.Popen):
proc.wait()
if use_thread:
# Put the done callback into the callback queue so that it
# can be received by a polling function in the main thread
thread_callback_queue.put([threading.current_thread(), done], block=True)
else:
done()
p = subprocess.Popen(cmd)
if use_thread:
threading.Thread(target=wait_for_proc, args=(p,)).start()
else:
wait_for_proc(p)
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
wrd = bpy.data.worlds['Arm']
print('Armory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
if wrd.arm_verbose_output:
print(f'Blender: {bpy.app.version_string}, Target: {state.target}, GAPI: {arm.utils.get_gapi()}')
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if not wrd.arm_cache_build:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
# have a "zoo" collection in the current scene
export_coll = bpy.data.collections.new("export_coll")
bpy.context.scene.collection.children.link(export_coll)
for scene in bpy.data.scenes:
if scene == bpy.context.scene: continue
for o in scene.collection.all_objects:
if o.type == "MESH" or o.type == "EMPTY":
if o.name not in export_coll.all_objects.keys():
export_coll.objects.link(o)
depsgraph = bpy.context.evaluated_depsgraph_get()
bpy.data.collections.remove(export_coll) # destroy "zoo" collection
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
ArmoryExporter.export_scene(bpy.context, asset_path, scene=scene, depsgraph=depsgraph)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
if wrd.arm_verbose_output:
print('Exported modules:', ', '.join(modules))
print('Shader flags:', ' '.join(defs))
print('Compositor flags:', ' '.join(cdefs))
print('Khafile flags:', ' '.join(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {'shader_datas': []}
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
# Workaround to also export non-material world shaders
res['shader_datas'] += make_world.shader_datas
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Change project version (Build, Publish)
if (not state.is_play) and (wrd.arm_project_version_autoinc):
wrd.arm_project_version = arm.utils.arm.utils.change_version_project(wrd.arm_project_version)
# Write khafile.js
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, ArmoryExporter.import_traits)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
# Windows - Set Visual Studio Version
if state.target.startswith('windows'):
cmd.append('-visualstudio')
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
cmd.append(vs_id)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_pref_or_default('khamake_debug', False):
cmd.append('--debug')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if not wrd.arm_verbose_output:
cmd.append("--quiet")
else:
print("Using project from " + arm.utils.get_fp())
print("Running: ", *cmd)
#Project needs to be compiled at least once
#before compilation server can work
if not os.path.exists(arm.utils.build_dir() + '/debug/krom/krom.js') and not state.is_publish:
state.proc_build = run_proc(cmd, build_done)
else:
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear(clear_warnings=True, clear_errors=True)
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
"""Called if the player was stopped/terminated."""
state.proc_play = None
state.redraw_ui = True
log.clear()
live_patch.stop()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js.temp'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def build_done():
print('Finished in {:0.3f}s'.format(time.time() - profile_time))
if log.num_warnings > 0:
log.print_warn(f'{log.num_warnings} warning{'s' if log.num_warnings > 1 else ''} occurred during compilation')
if state.proc_build is None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.error('Build failed, check console')
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
live_patch.start()
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Disabled':
cmd.append('--nosound')
if wrd.arm_verbose_output:
print("Running: ", *cmd)
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = os.path.join(arm.utils.get_fp_build(), target_name)
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
project_path = files_path
print('Exported HTML5 package to ' + project_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
project_path = files_path + '-build'
print('Exported XCode project to ' + project_path)
elif target_name.startswith('windows'):
project_path = files_path + '-build'
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print('Exported '+ vs_name +' project to ' + project_path)
elif target_name.startswith('android'):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(files_path + '-build', project_name)
print('Exported Android Studio project to ' + project_path)
elif target_name.startswith('krom'):
project_path = files_path
print('Exported Krom package to ' + project_path)
else:
project_path = files_path + '-build'
print('Exported makefiles to ' + project_path)
if not bpy.app.background and arm.utils.get_arm_preferences().open_build_directory:
arm.utils.open_folder(project_path)
# Android build APK
if target_name.startswith('android'):
if (arm.utils.get_project_android_build_apk()) and (len(arm.utils.get_android_sdk_root_path()) > 0):
print("\nBuilding APK")
# Check settings
path_sdk = arm.utils.get_android_sdk_root_path()
if len(path_sdk) > 0:
# Check Environment Variables - ANDROID_SDK_ROOT
if os.getenv('ANDROID_SDK_ROOT') == None:
# Set value from settings
os.environ['ANDROID_SDK_ROOT'] = path_sdk
else:
project_path = ''
# Build start
if len(project_path) > 0:
os.chdir(project_path) # set work folder
if arm.utils.get_os_is_windows():
state.proc_publish_build = run_proc(os.path.join(project_path, "gradlew.bat assembleDebug"), done_gradlew_build)
else:
cmd = shlex.split(os.path.join(project_path, "gradlew assembleDebug"))
state.proc_publish_build = run_proc(cmd, done_gradlew_build)
else:
print('\nBuilding APK Warning: ANDROID_SDK_ROOT is not specified in environment variables and "Android SDK Path" setting is not specified in preferences: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path" in the preferences, then repeat operation "Publish"')
# HTML5 After Publish
if target_name.startswith('html5'):
if len(arm.utils.get_html5_copy_path()) > 0 and (wrd.arm_project_html5_copy):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
dst = os.path.join(arm.utils.get_html5_copy_path(), project_name)
if os.path.exists(dst):
shutil.rmtree(dst)
try:
shutil.copytree(project_path, dst)
print("Copied files to " + dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(project_path, dst)
else: raise
if len(arm.utils.get_link_web_server()) and (wrd.arm_project_html5_start_browser):
link_html5_app = arm.utils.get_link_web_server() +'/'+ project_name
print("Running a browser with a link " + link_html5_app)
webbrowser.open(link_html5_app)
# Windows After Publish
if target_name.startswith('windows'):
list_vs = []
err = ''
# Print message
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
if int(wrd.arm_project_win_build) == 1:
print('\nOpen in Visual Studio ' + os.path.join(project_path, project_name + '.sln"'))
if int(wrd.arm_project_win_build) == 2:
print('\nCompile project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) == 3:
print('\nCompile and run project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) > 0:
# Check Visual Studio
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
if len(err) > 0:
print(err)
return
if len(list_vs) == 0:
print('No Visual Studio found')
return
is_check = False
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
is_check = True
break
if not is_check:
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print(vs_name + ' not found.')
print('The following are installed on the PC:')
for vs in list_vs:
print('- ' + vs[1] + ' (version ' + vs[3] +')')
return
# Current VS
vs_path = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_path = vs[2]
break
# Open in Visual Studio
if int(wrd.arm_project_win_build) == 1:
cmd = os.path.join('start "' + vs_path, 'Common7', 'IDE', 'devenv.exe" "' + os.path.join(project_path, project_name + '.sln"'))
subprocess.Popen(cmd, shell=True)
# Compile
if int(wrd.arm_project_win_build) > 1:
bits = '64' if wrd.arm_project_win_build_arch == 'x64' else '32'
# vcvars
cmd = os.path.join(vs_path, 'VC', 'Auxiliary', 'Build', 'vcvars' + bits + '.bat')
if not os.path.isfile(cmd):
print('File "'+ cmd +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
return
state.proc_publish_build = run_proc(cmd, done_vs_vars)
def done_gradlew_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
path_apk = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
path_apk = os.path.join(path_apk + '-build', project_name, 'app', 'build', 'outputs', 'apk', 'debug')
print("\nBuild APK to " + path_apk)
# Rename APK
apk_name = 'app-debug.apk'
file_name = os.path.join(path_apk, apk_name)
if wrd.arm_project_android_rename_apk:
apk_name = project_name + '.apk'
os.rename(file_name, os.path.join(path_apk, apk_name))
file_name = os.path.join(path_apk, apk_name)
print("\nRename APK to " + apk_name)
# Copy APK
if wrd.arm_project_android_copy_apk:
shutil.copyfile(file_name, os.path.join(arm.utils.get_android_apk_copy_path(), apk_name))
print("Copy APK to " + arm.utils.get_android_apk_copy_path())
# Open directory with APK
if arm.utils.get_android_open_build_apk_directory():
arm.utils.open_folder(path_apk)
# Open directory after copy APK
if arm.utils.get_android_apk_copy_open_directory():
arm.utils.open_folder(arm.utils.get_android_apk_copy_path())
# Running emulator
if wrd.arm_project_android_run_avd:
run_android_emulators(arm.utils.get_android_emulator_name())
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
os.environ['ANDROID_SDK_ROOT'] = ''
log.error('Building the APK failed, check console')
def run_android_emulators(avd_name):
if len(avd_name.strip()) == 0:
return
print('\nRunning Emulator "'+ avd_name +'"')
path_file = arm.utils.get_android_emulator_file()
if len(path_file) > 0:
if arm.utils.get_os_is_windows():
run_proc(path_file + " -avd "+ avd_name, None)
else:
cmd = shlex.split(path_file + " -avd "+ avd_name)
run_proc(cmd, None)
else:
print('Update List Emulators Warning: File "'+ path_file +'" not found. Check that the variable ANDROID_SDK_ROOT is correct in environment variables or in "Android SDK Path" setting: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path", then repeat operation "Publish"')
def done_vs_vars():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
# MSBuild
wrd = bpy.data.worlds['Arm']
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
# Current VS
vs_path = ''
vs_name = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_name = vs[1]
vs_path = vs[2]
break
msbuild = os.path.join(vs_path, 'MSBuild', 'Current', 'Bin', 'MSBuild.exe')
if not os.path.isfile(msbuild):
print('File "'+ msbuild +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
state.redraw_ui = True
return
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
cmd = '"' + msbuild + '" "' + os.path.join(project_path, project_name + '.vcxproj"')
# Arguments
platform = 'x64' if wrd.arm_project_win_build_arch == 'x64' else 'win32'
log_param = wrd.arm_project_win_build_log
if log_param == 'WarningsAndErrorsOnly':
log_param = 'WarningsOnly;ErrorsOnly'
cmd = cmd + ' -m:' + str(wrd.arm_project_win_build_cpu) + ' -clp:'+ log_param +' /p:Configuration='+ wrd.arm_project_win_build_mode +' /p:Platform=' + platform
print('\nCompiling the project ' + os.path.join(project_path, project_name + '.vcxproj"'))
state.proc_publish_build = run_proc(cmd, done_vs_build)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('\nCompile failed, check console')
def done_vs_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
if wrd.arm_project_win_build_arch == 'x64':
path = os.path.join(project_path, 'x64', wrd.arm_project_win_build_mode)
else:
path = os.path.join(project_path, wrd.arm_project_win_build_mode)
print('\nCompilation completed in ' + path)
# Run
if int(wrd.arm_project_win_build) == 3:
# Copying the executable file
res_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
file_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version) + '.exe'
print('\nCopy the executable file from ' + path + ' to ' + res_path)
shutil.copyfile(os.path.join(path, file_name), os.path.join(res_path, file_name))
path = res_path
# Run project
cmd = os.path.join('"' + res_path, file_name + '"')
print('Run the executable file to ' + cmd)
os.chdir(res_path) # set work folder
subprocess.Popen(cmd, shell=True)
# Open Build Directory
if wrd.arm_project_win_build_open:
arm.utils.open_folder(path)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('Compile failed, check console')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# Remove Shape key Textures
if os.path.exists('MorphTargets/'):
shutil.rmtree('MorphTargets/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
| import errno
import glob
import json
import os
from queue import Queue
import shlex
import shutil
import stat
import subprocess
import threading
import time
from typing import Callable
import webbrowser
import bpy
import arm.assets as assets
from arm.exporter import ArmoryExporter
import arm.lib.make_datas
import arm.lib.server
import arm.live_patch as live_patch
import arm.log as log
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_state as state
import arm.make_world as make_world
import arm.utils
import arm.write_data as write_data
if arm.is_reload(__name__):
assets = arm.reload_module(assets)
arm.exporter = arm.reload_module(arm.exporter)
from arm.exporter import ArmoryExporter
arm.lib.make_datas = arm.reload_module(arm.lib.make_datas)
arm.lib.server = arm.reload_module(arm.lib.server)
live_patch = arm.reload_module(live_patch)
log = arm.reload_module(log)
make_logic = arm.reload_module(make_logic)
make_renderpath = arm.reload_module(make_renderpath)
state = arm.reload_module(state)
make_world = arm.reload_module(make_world)
arm.utils = arm.reload_module(arm.utils)
write_data = arm.reload_module(write_data)
else:
arm.enable_reload(__name__)
scripts_mtime = 0 # Monitor source changes
profile_time = 0
# Queue of threads and their done callbacks. Item format: [thread, done]
thread_callback_queue = Queue(maxsize=0)
def run_proc(cmd, done: Callable) -> subprocess.Popen:
"""Creates a subprocess with the given command and returns it.
If Blender is not running in background mode, a thread is spawned
that waits until the subprocess has finished executing to not freeze
the UI, otherwise (in background mode) execution is blocked until
the subprocess has finished.
If `done` is not `None`, it is called afterwards in the main thread.
"""
use_thread = not bpy.app.background
def wait_for_proc(proc: subprocess.Popen):
proc.wait()
if use_thread:
# Put the done callback into the callback queue so that it
# can be received by a polling function in the main thread
thread_callback_queue.put([threading.current_thread(), done], block=True)
else:
done()
p = subprocess.Popen(cmd)
if use_thread:
threading.Thread(target=wait_for_proc, args=(p,)).start()
else:
wait_for_proc(p)
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
wrd = bpy.data.worlds['Arm']
print('Armory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
if wrd.arm_verbose_output:
print(f'Blender: {bpy.app.version_string}, Target: {state.target}, GAPI: {arm.utils.get_gapi()}')
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if not wrd.arm_cache_build:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
# have a "zoo" collection in the current scene
export_coll = bpy.data.collections.new("export_coll")
bpy.context.scene.collection.children.link(export_coll)
for scene in bpy.data.scenes:
if scene == bpy.context.scene: continue
for o in scene.collection.all_objects:
if o.type == "MESH" or o.type == "EMPTY":
if o.name not in export_coll.all_objects.keys():
export_coll.objects.link(o)
depsgraph = bpy.context.evaluated_depsgraph_get()
bpy.data.collections.remove(export_coll) # destroy "zoo" collection
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
ArmoryExporter.export_scene(bpy.context, asset_path, scene=scene, depsgraph=depsgraph)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
if wrd.arm_verbose_output:
print('Exported modules:', ', '.join(modules))
print('Shader flags:', ' '.join(defs))
print('Compositor flags:', ' '.join(cdefs))
print('Khafile flags:', ' '.join(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {'shader_datas': []}
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
# Workaround to also export non-material world shaders
res['shader_datas'] += make_world.shader_datas
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Change project version (Build, Publish)
if (not state.is_play) and (wrd.arm_project_version_autoinc):
wrd.arm_project_version = arm.utils.arm.utils.change_version_project(wrd.arm_project_version)
# Write khafile.js
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, ArmoryExporter.import_traits)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
# Windows - Set Visual Studio Version
if state.target.startswith('windows'):
cmd.append('-visualstudio')
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
cmd.append(vs_id)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_pref_or_default('khamake_debug', False):
cmd.append('--debug')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if not wrd.arm_verbose_output:
cmd.append("--quiet")
else:
print("Using project from " + arm.utils.get_fp())
print("Running: ", *cmd)
#Project needs to be compiled at least once
#before compilation server can work
if not os.path.exists(arm.utils.build_dir() + '/debug/krom/krom.js') and not state.is_publish:
state.proc_build = run_proc(cmd, build_done)
else:
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear(clear_warnings=True, clear_errors=True)
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
"""Called if the player was stopped/terminated."""
state.proc_play = None
state.redraw_ui = True
log.clear()
live_patch.stop()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js.temp'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def build_done():
print('Finished in {:0.3f}s'.format(time.time() - profile_time))
if log.num_warnings > 0:
log.print_warn(f'{log.num_warnings} warning{"s" if log.num_warnings > 1 else ""} occurred during compilation')
if state.proc_build is None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.error('Build failed, check console')
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
live_patch.start()
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Disabled':
cmd.append('--nosound')
if wrd.arm_verbose_output:
print("Running: ", *cmd)
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = os.path.join(arm.utils.get_fp_build(), target_name)
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
project_path = files_path
print('Exported HTML5 package to ' + project_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
project_path = files_path + '-build'
print('Exported XCode project to ' + project_path)
elif target_name.startswith('windows'):
project_path = files_path + '-build'
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print('Exported '+ vs_name +' project to ' + project_path)
elif target_name.startswith('android'):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(files_path + '-build', project_name)
print('Exported Android Studio project to ' + project_path)
elif target_name.startswith('krom'):
project_path = files_path
print('Exported Krom package to ' + project_path)
else:
project_path = files_path + '-build'
print('Exported makefiles to ' + project_path)
if not bpy.app.background and arm.utils.get_arm_preferences().open_build_directory:
arm.utils.open_folder(project_path)
# Android build APK
if target_name.startswith('android'):
if (arm.utils.get_project_android_build_apk()) and (len(arm.utils.get_android_sdk_root_path()) > 0):
print("\nBuilding APK")
# Check settings
path_sdk = arm.utils.get_android_sdk_root_path()
if len(path_sdk) > 0:
# Check Environment Variables - ANDROID_SDK_ROOT
if os.getenv('ANDROID_SDK_ROOT') == None:
# Set value from settings
os.environ['ANDROID_SDK_ROOT'] = path_sdk
else:
project_path = ''
# Build start
if len(project_path) > 0:
os.chdir(project_path) # set work folder
if arm.utils.get_os_is_windows():
state.proc_publish_build = run_proc(os.path.join(project_path, "gradlew.bat assembleDebug"), done_gradlew_build)
else:
cmd = shlex.split(os.path.join(project_path, "gradlew assembleDebug"))
state.proc_publish_build = run_proc(cmd, done_gradlew_build)
else:
print('\nBuilding APK Warning: ANDROID_SDK_ROOT is not specified in environment variables and "Android SDK Path" setting is not specified in preferences: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path" in the preferences, then repeat operation "Publish"')
# HTML5 After Publish
if target_name.startswith('html5'):
if len(arm.utils.get_html5_copy_path()) > 0 and (wrd.arm_project_html5_copy):
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
dst = os.path.join(arm.utils.get_html5_copy_path(), project_name)
if os.path.exists(dst):
shutil.rmtree(dst)
try:
shutil.copytree(project_path, dst)
print("Copied files to " + dst)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(project_path, dst)
else: raise
if len(arm.utils.get_link_web_server()) and (wrd.arm_project_html5_start_browser):
link_html5_app = arm.utils.get_link_web_server() +'/'+ project_name
print("Running a browser with a link " + link_html5_app)
webbrowser.open(link_html5_app)
# Windows After Publish
if target_name.startswith('windows'):
list_vs = []
err = ''
# Print message
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
if int(wrd.arm_project_win_build) == 1:
print('\nOpen in Visual Studio ' + os.path.join(project_path, project_name + '.sln"'))
if int(wrd.arm_project_win_build) == 2:
print('\nCompile project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) == 3:
print('\nCompile and run project ' + os.path.join(project_path, project_name + '.vcxproj'))
if int(wrd.arm_project_win_build) > 0:
# Check Visual Studio
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
if len(err) > 0:
print(err)
return
if len(list_vs) == 0:
print('No Visual Studio found')
return
is_check = False
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
is_check = True
break
if not is_check:
vs_ver, vs_year, vs_name, vs_id = arm.utils.get_visual_studio_from_version(wrd.arm_project_win_list_vs)
print(vs_name + ' not found.')
print('The following are installed on the PC:')
for vs in list_vs:
print('- ' + vs[1] + ' (version ' + vs[3] +')')
return
# Current VS
vs_path = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_path = vs[2]
break
# Open in Visual Studio
if int(wrd.arm_project_win_build) == 1:
cmd = os.path.join('start "' + vs_path, 'Common7', 'IDE', 'devenv.exe" "' + os.path.join(project_path, project_name + '.sln"'))
subprocess.Popen(cmd, shell=True)
# Compile
if int(wrd.arm_project_win_build) > 1:
bits = '64' if wrd.arm_project_win_build_arch == 'x64' else '32'
# vcvars
cmd = os.path.join(vs_path, 'VC', 'Auxiliary', 'Build', 'vcvars' + bits + '.bat')
if not os.path.isfile(cmd):
print('File "'+ cmd +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
return
state.proc_publish_build = run_proc(cmd, done_vs_vars)
def done_gradlew_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
path_apk = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
path_apk = os.path.join(path_apk + '-build', project_name, 'app', 'build', 'outputs', 'apk', 'debug')
print("\nBuild APK to " + path_apk)
# Rename APK
apk_name = 'app-debug.apk'
file_name = os.path.join(path_apk, apk_name)
if wrd.arm_project_android_rename_apk:
apk_name = project_name + '.apk'
os.rename(file_name, os.path.join(path_apk, apk_name))
file_name = os.path.join(path_apk, apk_name)
print("\nRename APK to " + apk_name)
# Copy APK
if wrd.arm_project_android_copy_apk:
shutil.copyfile(file_name, os.path.join(arm.utils.get_android_apk_copy_path(), apk_name))
print("Copy APK to " + arm.utils.get_android_apk_copy_path())
# Open directory with APK
if arm.utils.get_android_open_build_apk_directory():
arm.utils.open_folder(path_apk)
# Open directory after copy APK
if arm.utils.get_android_apk_copy_open_directory():
arm.utils.open_folder(arm.utils.get_android_apk_copy_path())
# Running emulator
if wrd.arm_project_android_run_avd:
run_android_emulators(arm.utils.get_android_emulator_name())
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
os.environ['ANDROID_SDK_ROOT'] = ''
log.error('Building the APK failed, check console')
def run_android_emulators(avd_name):
if len(avd_name.strip()) == 0:
return
print('\nRunning Emulator "'+ avd_name +'"')
path_file = arm.utils.get_android_emulator_file()
if len(path_file) > 0:
if arm.utils.get_os_is_windows():
run_proc(path_file + " -avd "+ avd_name, None)
else:
cmd = shlex.split(path_file + " -avd "+ avd_name)
run_proc(cmd, None)
else:
print('Update List Emulators Warning: File "'+ path_file +'" not found. Check that the variable ANDROID_SDK_ROOT is correct in environment variables or in "Android SDK Path" setting: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path", then repeat operation "Publish"')
def done_vs_vars():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
# MSBuild
wrd = bpy.data.worlds['Arm']
list_vs, err = arm.utils.get_list_installed_vs(True, True, True)
# Current VS
vs_path = ''
vs_name = ''
for vs in list_vs:
if vs[0] == wrd.arm_project_win_list_vs:
vs_name = vs[1]
vs_path = vs[2]
break
msbuild = os.path.join(vs_path, 'MSBuild', 'Current', 'Bin', 'MSBuild.exe')
if not os.path.isfile(msbuild):
print('File "'+ msbuild +'" not found. Verify ' + vs_name + ' was installed correctly')
log.error('Compile failed, check console')
state.redraw_ui = True
return
project_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version)
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
cmd = '"' + msbuild + '" "' + os.path.join(project_path, project_name + '.vcxproj"')
# Arguments
platform = 'x64' if wrd.arm_project_win_build_arch == 'x64' else 'win32'
log_param = wrd.arm_project_win_build_log
if log_param == 'WarningsAndErrorsOnly':
log_param = 'WarningsOnly;ErrorsOnly'
cmd = cmd + ' -m:' + str(wrd.arm_project_win_build_cpu) + ' -clp:'+ log_param +' /p:Configuration='+ wrd.arm_project_win_build_mode +' /p:Platform=' + platform
print('\nCompiling the project ' + os.path.join(project_path, project_name + '.vcxproj"'))
state.proc_publish_build = run_proc(cmd, done_vs_build)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('\nCompile failed, check console')
def done_vs_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
project_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target)) + '-build'
if wrd.arm_project_win_build_arch == 'x64':
path = os.path.join(project_path, 'x64', wrd.arm_project_win_build_mode)
else:
path = os.path.join(project_path, wrd.arm_project_win_build_mode)
print('\nCompilation completed in ' + path)
# Run
if int(wrd.arm_project_win_build) == 3:
# Copying the executable file
res_path = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
file_name = arm.utils.safesrc(wrd.arm_project_name +'-'+ wrd.arm_project_version) + '.exe'
print('\nCopy the executable file from ' + path + ' to ' + res_path)
shutil.copyfile(os.path.join(path, file_name), os.path.join(res_path, file_name))
path = res_path
# Run project
cmd = os.path.join('"' + res_path, file_name + '"')
print('Run the executable file to ' + cmd)
os.chdir(res_path) # set work folder
subprocess.Popen(cmd, shell=True)
# Open Build Directory
if wrd.arm_project_win_build_open:
arm.utils.open_folder(path)
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
log.error('Compile failed, check console')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# Remove Shape key Textures
if os.path.exists('MorphTargets/'):
shutil.rmtree('MorphTargets/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os.path
from abc import ABCMeta, abstractmethod
from collections.abc import MutableSequence, MutableSet
from dataclasses import dataclass
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Type, Union
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.target import Target
from pants.engine.addressable import addressable_list
from pants.engine.fs import GlobExpansionConjunction, PathGlobs
from pants.engine.objects import Locatable, union
from pants.engine.rules import UnionRule
from pants.engine.struct import Struct, StructWithDeps
from pants.source import wrapped_globs
from pants.util.contextutil import exception_logging
from pants.util.meta import classproperty
from pants.util.objects import Exactly
logger = logging.getLogger(__name__)
class TargetAdaptor(StructWithDeps):
"""A Struct to imitate the existing Target.
Extends StructWithDeps to add a `dependencies` field marked Addressable.
"""
def get_sources(self) -> Optional["GlobsWithConjunction"]:
"""Returns target's non-deferred sources if exists or the default sources if defined.
NB: once ivy is implemented in the engine, we can fetch sources natively here, and/or
refactor how deferred sources are implemented.
see: https://github.com/pantsbuild/pants/issues/2997
"""
source = getattr(self, 'source', None)
sources = getattr(self, 'sources', None)
if source is not None and sources is not None:
raise Target.IllegalArgument(
self.address.spec,
'Cannot specify both source and sources attribute.'
)
if source is not None:
if not isinstance(source, str):
raise Target.IllegalArgument(
self.address.spec,
f"source must be a str containing a path relative to the target, but got {source} of "
f"type {type(source)}"
)
sources = [source]
# N.B. Here we check specifically for `sources is None`, as it's possible for sources
# to be e.g. an explicit empty list (sources=[]).
if sources is None:
if self.default_sources_globs is None:
return None
default_globs = Files(
*(
*self.default_sources_globs,
*(f"!{glob}" for glob in self.default_sources_exclude_globs or []),
),
spec_path=self.address.spec_path,
)
return GlobsWithConjunction(default_globs, GlobExpansionConjunction.any_match)
globs = BaseGlobs.from_sources_field(sources, self.address.spec_path)
return GlobsWithConjunction(globs, GlobExpansionConjunction.all_match)
@property
def field_adaptors(self) -> Tuple:
"""Returns a tuple of Fields for captured fields which need additional treatment."""
with exception_logging(logger, 'Exception in `field_adaptors` property'):
conjunction_globs = self.get_sources()
if conjunction_globs is None:
return tuple()
sources = conjunction_globs.non_path_globs
if not sources:
return tuple()
base_globs = BaseGlobs.from_sources_field(sources, self.address.spec_path)
path_globs = base_globs.to_path_globs(self.address.spec_path, conjunction_globs.conjunction)
sources_field = SourcesField(
self.address,
'sources',
base_globs.filespecs,
base_globs,
path_globs,
self.validate_sources,
)
return sources_field,
@classproperty
def default_sources_globs(cls):
return None
@classproperty
def default_sources_exclude_globs(cls):
return None
def validate_sources(self, sources):
""""
Validate that the sources argument is allowed.
Examples may be to check that the number of sources is correct, that file extensions are as
expected, etc.
TODO: Replace this with some kind of field subclassing, as per
https://github.com/pantsbuild/pants/issues/4535
:param sources EagerFilesetWithSpec resolved sources.
"""
@union
class HydrateableField:
"""A marker for Target(Adaptor) fields for which the engine might perform extra construction."""
@dataclass(frozen=True)
class SourcesField:
"""Represents the `sources` argument for a particular Target.
Sources are currently eagerly computed in-engine in order to provide the `BuildGraph`
API efficiently; once tasks are explicitly requesting particular Products for Targets,
lazy construction will be more natural.
see https://github.com/pantsbuild/pants/issues/3560
:param address: The BuildFileAddress of the TargetAdaptor for which this field is an argument.
:param arg: The name of this argument: usually 'sources', but occasionally also 'resources' in the
case of python resource globs.
:param filespecs: The merged filespecs dict the describes the paths captured by this field.
:param path_globs: A PathGlobs describing included files.
:param validate_fn: A function which takes an EagerFilesetWithSpec and throws if it's not
acceptable. This API will almost certainly change in the near future.
"""
address: BuildFileAddress
arg: str
filespecs: wrapped_globs.Filespec
base_globs: "BaseGlobs"
path_globs: PathGlobs
validate_fn: Callable
def __hash__(self):
return hash((self.address, self.arg))
def __repr__(self):
return '{}(address={}, input_globs={}, arg={}, filespecs={!r})'.format(
type(self).__name__, self.address, self.base_globs, self.arg, self.filespecs)
class JvmBinaryAdaptor(TargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) > 1:
raise Target.IllegalArgument(self.address.spec,
'jvm_binary must have exactly 0 or 1 sources (typically used to specify the class '
'containing the main method). '
'Other sources should instead be placed in a java_library, which '
'should be referenced in the jvm_binary\'s dependencies.'
)
class PageAdaptor(TargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) != 1:
raise Target.IllegalArgument(
self.address.spec,
'page targets must have exactly 1 source, but found {} ({})'.format(
len(sources.files),
', '.join(sources.files),
)
)
@dataclass(frozen=True)
class BundlesField:
"""Represents the `bundles` argument, each of which has a PathGlobs to represent its `fileset`."""
address: BuildFileAddress
bundles: Any
filespecs_list: List[wrapped_globs.Filespec]
path_globs_list: List[PathGlobs]
def __hash__(self):
return hash(self.address)
class BundleAdaptor(Struct):
"""A Struct to capture the args for the `bundle` object.
Bundles have filesets which we need to capture in order to execute them in the engine.
TODO: Bundles should arguably be Targets, but that distinction blurs in the `exp` examples
package, where a Target is just a collection of configuration.
"""
class AppAdaptor(TargetAdaptor):
def __init__(self, bundles=None, **kwargs):
"""
:param list bundles: A list of `BundleAdaptor` objects
"""
super().__init__(**kwargs)
self.bundles = bundles
@addressable_list(Exactly(BundleAdaptor))
def bundles(self):
"""The BundleAdaptors for this JvmApp."""
return self.bundles
@property
def field_adaptors(self) -> Tuple:
with exception_logging(logger, 'Exception in `field_adaptors` property'):
field_adaptors = super().field_adaptors
if getattr(self, 'bundles', None) is None:
return field_adaptors
bundles_field = self._construct_bundles_field()
return (*field_adaptors, bundles_field)
def _construct_bundles_field(self) -> BundlesField:
filespecs_list: List[wrapped_globs.Filespec] = []
path_globs_list: List[PathGlobs] = []
for bundle in self.bundles:
# NB: if a bundle has a rel_path, then the rel_root of the resulting file globs must be
# set to that rel_path.
rel_root = getattr(bundle, 'rel_path', self.address.spec_path)
base_globs = BaseGlobs.from_sources_field(bundle.fileset, rel_root)
path_globs = base_globs.to_path_globs(rel_root, GlobExpansionConjunction.all_match)
filespecs_list.append(base_globs.filespecs)
path_globs_list.append(path_globs)
return BundlesField(
self.address, self.bundles, filespecs_list, path_globs_list,
)
class JvmAppAdaptor(AppAdaptor): pass
class PythonAppAdaptor(AppAdaptor): pass
class ResourcesAdaptor(TargetAdaptor): pass
class RemoteSourcesAdaptor(TargetAdaptor):
def __init__(self, dest=None, **kwargs):
"""
:param dest: A target constructor.
"""
if not isinstance(dest, str):
dest = dest._type_alias
super().__init__(dest=dest, **kwargs)
class PythonTargetAdaptor(TargetAdaptor):
@property
def field_adaptors(self) -> Tuple:
with exception_logging(logger, 'Exception in `field_adaptors` property'):
field_adaptors = super().field_adaptors
if getattr(self, 'resources', None) is None:
return field_adaptors
base_globs = BaseGlobs.from_sources_field(self.resources, self.address.spec_path)
path_globs = base_globs.to_path_globs(self.address.spec_path, GlobExpansionConjunction.all_match)
sources_field = SourcesField(self.address,
'resources',
base_globs.filespecs,
base_globs,
path_globs,
lambda _: None)
return (*field_adaptors, sources_field)
class PythonBinaryAdaptor(PythonTargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) > 1:
raise Target.IllegalArgument(self.address.spec,
'python_binary must have exactly 0 or 1 sources (typically used to specify the file '
'containing the entry point). '
'Other sources should instead be placed in a python_library, which '
'should be referenced in the python_binary\'s dependencies.'
)
class PythonTestsAdaptor(PythonTargetAdaptor): pass
class PythonAWSLambdaAdaptor(TargetAdaptor): pass
class PythonRequirementLibraryAdaptor(TargetAdaptor): pass
class PantsPluginAdaptor(PythonTargetAdaptor):
def get_sources(self) -> "GlobsWithConjunction":
return GlobsWithConjunction.for_literal_files(['register.py'], self.address.spec_path)
# TODO: Remove all the subclasses once we remove globs et al. The only remaining subclass would be
# Files, which should simply be unified into BaseGlobs.
class BaseGlobs(Locatable, metaclass=ABCMeta):
"""An adaptor class to allow BUILD file parsing from ContextAwareObjectFactories."""
@staticmethod
def from_sources_field(
sources: Union[None, str, Iterable[str], "BaseGlobs"], spec_path: str,
) -> "BaseGlobs":
"""Return a BaseGlobs for the given sources field."""
if sources is None:
return Files(spec_path=spec_path)
if isinstance(sources, BaseGlobs):
return sources
if isinstance(sources, str):
return Files(sources, spec_path=spec_path)
if (
isinstance(sources, (MutableSet, MutableSequence, tuple))
and all(isinstance(s, str) for s in sources)
):
return Files(*sources, spec_path=spec_path)
raise ValueError(f'Expected either a glob or list of literal sources. Got: {sources}')
@property
@abstractmethod
def path_globs_kwarg(self) -> str:
"""The name of the `PathGlobs` parameter corresponding to this BaseGlobs instance."""
@property
@abstractmethod
def legacy_globs_class(self) -> Type[wrapped_globs.FilesetRelPathWrapper]:
"""The corresponding `wrapped_globs` class for this BaseGlobs."""
# TODO: stop accepting an `exclude` argument once we remove `globs` et al.
def __init__(
self, *patterns: str, spec_path: str, exclude: Optional[List[str]] = None, **kwargs,
) -> None:
self._patterns = patterns
self._spec_path = spec_path
self._raw_exclude = exclude
if isinstance(exclude, str):
raise ValueError(f'Excludes should be a list of strings. Got: {exclude!r}')
if kwargs:
raise ValueError(f'kwargs not supported. Got: {kwargs}')
# TODO: once we remove `globs`, `rglobs`, and `zglobs`, we should change as follows:
# * Stop setting `self._parsed_include` and `self._parsed_exclude`. Only save `self._patterns`.
# All the below code should be deleted. For now, we must have these values to ensure that we
# properly parse the `globs()` function.
# * `to_path_globs()` will still need to strip the leading `!` from the exclude pattern, call
# `os.path.join`, and then prepend it back with `!`. But, it will do that when traversing
# over `self._patterns`, rather than `self._parsed_exclude`. We have a new unit test to
# ensure that we don't break this.
# * `filespecs()` must still need to split out the includes from excludes to maintain backwards
# compatibility. The below for loop splitting out the `self._patterns` should be moved
# into `filespecs()`. We have a new unit test to ensure that we don't break this.
self._parsed_include: List[str] = []
self._parsed_exclude: List[str] = []
if isinstance(self, Files):
for glob in self._patterns:
if glob.startswith("!"):
self._parsed_exclude.append(glob[1:])
else:
self._parsed_include.append(glob)
else:
self._parsed_include = self.legacy_globs_class.to_filespec(patterns)['globs']
self._parsed_exclude = self._parse_exclude(exclude or [])
@property
def filespecs(self) -> wrapped_globs.Filespec:
"""Return a filespecs dict representing both globs and excludes."""
filespecs: wrapped_globs.Filespec = {'globs': self._parsed_include}
if self._parsed_exclude:
filespecs['exclude'] = [{'globs': self._parsed_exclude}]
return filespecs
def to_path_globs(self, relpath: str, conjunction: GlobExpansionConjunction) -> PathGlobs:
"""Return a PathGlobs representing the included and excluded Files for these patterns."""
return PathGlobs(
globs=(
*(os.path.join(relpath, glob) for glob in self._parsed_include),
*(f"!{os.path.join(relpath, glob)}" for glob in self._parsed_exclude)
),
conjunction=conjunction,
)
def _parse_exclude(self, raw_exclude: List[str]) -> List[str]:
excluded_patterns: List[str] = []
for raw_element in raw_exclude:
exclude_filespecs = BaseGlobs.from_sources_field(raw_element, self._spec_path).filespecs
if exclude_filespecs.get('exclude'):
raise ValueError('Nested excludes are not supported: got {}'.format(raw_element))
excluded_patterns.extend(exclude_filespecs['globs'])
return excluded_patterns
def _gen_init_args_str(self) -> str:
all_arg_strs = []
positional_args = ', '.join(repr(p) for p in self._patterns)
if positional_args:
all_arg_strs.append(positional_args)
all_arg_strs.append(f"spec_path={self._spec_path}")
if self._raw_exclude:
all_arg_strs.append(f"exclude={self._raw_exclude}")
return ', '.join(all_arg_strs)
def __repr__(self) -> str:
# TODO: remove this once we finish deprecating `globs` et al. Use the __str__ implementation.
return f'{type(self).__name__}({self._gen_init_args_str()})'
def __str__(self) -> str:
return f'{self.path_globs_kwarg}({self._gen_init_args_str()})'
class Files(BaseGlobs):
path_globs_kwarg = 'files'
legacy_globs_class = wrapped_globs.Globs
def __str__(self) -> str:
return f"[{", ".join(repr(p) for p in self._patterns)}]"
class Globs(BaseGlobs):
path_globs_kwarg = 'globs'
legacy_globs_class = wrapped_globs.Globs
class RGlobs(BaseGlobs):
path_globs_kwarg = 'rglobs'
legacy_globs_class = wrapped_globs.RGlobs
class ZGlobs(BaseGlobs):
path_globs_kwarg = 'zglobs'
legacy_globs_class = wrapped_globs.ZGlobs
@dataclass(frozen=True)
class GlobsWithConjunction:
non_path_globs: BaseGlobs
conjunction: GlobExpansionConjunction
@classmethod
def for_literal_files(cls, file_paths: Sequence[str], spec_path: str) -> "GlobsWithConjunction":
return cls(Files(*file_paths, spec_path=spec_path), GlobExpansionConjunction.all_match)
def rules():
return [
UnionRule(HydrateableField, SourcesField),
UnionRule(HydrateableField, BundlesField),
]
| # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os.path
from abc import ABCMeta, abstractmethod
from collections.abc import MutableSequence, MutableSet
from dataclasses import dataclass
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Type, Union
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.target import Target
from pants.engine.addressable import addressable_list
from pants.engine.fs import GlobExpansionConjunction, PathGlobs
from pants.engine.objects import Locatable, union
from pants.engine.rules import UnionRule
from pants.engine.struct import Struct, StructWithDeps
from pants.source import wrapped_globs
from pants.util.contextutil import exception_logging
from pants.util.meta import classproperty
from pants.util.objects import Exactly
logger = logging.getLogger(__name__)
class TargetAdaptor(StructWithDeps):
"""A Struct to imitate the existing Target.
Extends StructWithDeps to add a `dependencies` field marked Addressable.
"""
def get_sources(self) -> Optional["GlobsWithConjunction"]:
"""Returns target's non-deferred sources if exists or the default sources if defined.
NB: once ivy is implemented in the engine, we can fetch sources natively here, and/or
refactor how deferred sources are implemented.
see: https://github.com/pantsbuild/pants/issues/2997
"""
source = getattr(self, 'source', None)
sources = getattr(self, 'sources', None)
if source is not None and sources is not None:
raise Target.IllegalArgument(
self.address.spec,
'Cannot specify both source and sources attribute.'
)
if source is not None:
if not isinstance(source, str):
raise Target.IllegalArgument(
self.address.spec,
f"source must be a str containing a path relative to the target, but got {source} of "
f"type {type(source)}"
)
sources = [source]
# N.B. Here we check specifically for `sources is None`, as it's possible for sources
# to be e.g. an explicit empty list (sources=[]).
if sources is None:
if self.default_sources_globs is None:
return None
default_globs = Files(
*(
*self.default_sources_globs,
*(f"!{glob}" for glob in self.default_sources_exclude_globs or []),
),
spec_path=self.address.spec_path,
)
return GlobsWithConjunction(default_globs, GlobExpansionConjunction.any_match)
globs = BaseGlobs.from_sources_field(sources, self.address.spec_path)
return GlobsWithConjunction(globs, GlobExpansionConjunction.all_match)
@property
def field_adaptors(self) -> Tuple:
"""Returns a tuple of Fields for captured fields which need additional treatment."""
with exception_logging(logger, 'Exception in `field_adaptors` property'):
conjunction_globs = self.get_sources()
if conjunction_globs is None:
return tuple()
sources = conjunction_globs.non_path_globs
if not sources:
return tuple()
base_globs = BaseGlobs.from_sources_field(sources, self.address.spec_path)
path_globs = base_globs.to_path_globs(self.address.spec_path, conjunction_globs.conjunction)
sources_field = SourcesField(
self.address,
'sources',
base_globs.filespecs,
base_globs,
path_globs,
self.validate_sources,
)
return sources_field,
@classproperty
def default_sources_globs(cls):
return None
@classproperty
def default_sources_exclude_globs(cls):
return None
def validate_sources(self, sources):
""""
Validate that the sources argument is allowed.
Examples may be to check that the number of sources is correct, that file extensions are as
expected, etc.
TODO: Replace this with some kind of field subclassing, as per
https://github.com/pantsbuild/pants/issues/4535
:param sources EagerFilesetWithSpec resolved sources.
"""
@union
class HydrateableField:
"""A marker for Target(Adaptor) fields for which the engine might perform extra construction."""
@dataclass(frozen=True)
class SourcesField:
"""Represents the `sources` argument for a particular Target.
Sources are currently eagerly computed in-engine in order to provide the `BuildGraph`
API efficiently; once tasks are explicitly requesting particular Products for Targets,
lazy construction will be more natural.
see https://github.com/pantsbuild/pants/issues/3560
:param address: The BuildFileAddress of the TargetAdaptor for which this field is an argument.
:param arg: The name of this argument: usually 'sources', but occasionally also 'resources' in the
case of python resource globs.
:param filespecs: The merged filespecs dict the describes the paths captured by this field.
:param path_globs: A PathGlobs describing included files.
:param validate_fn: A function which takes an EagerFilesetWithSpec and throws if it's not
acceptable. This API will almost certainly change in the near future.
"""
address: BuildFileAddress
arg: str
filespecs: wrapped_globs.Filespec
base_globs: "BaseGlobs"
path_globs: PathGlobs
validate_fn: Callable
def __hash__(self):
return hash((self.address, self.arg))
def __repr__(self):
return '{}(address={}, input_globs={}, arg={}, filespecs={!r})'.format(
type(self).__name__, self.address, self.base_globs, self.arg, self.filespecs)
class JvmBinaryAdaptor(TargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) > 1:
raise Target.IllegalArgument(self.address.spec,
'jvm_binary must have exactly 0 or 1 sources (typically used to specify the class '
'containing the main method). '
'Other sources should instead be placed in a java_library, which '
'should be referenced in the jvm_binary\'s dependencies.'
)
class PageAdaptor(TargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) != 1:
raise Target.IllegalArgument(
self.address.spec,
'page targets must have exactly 1 source, but found {} ({})'.format(
len(sources.files),
', '.join(sources.files),
)
)
@dataclass(frozen=True)
class BundlesField:
"""Represents the `bundles` argument, each of which has a PathGlobs to represent its `fileset`."""
address: BuildFileAddress
bundles: Any
filespecs_list: List[wrapped_globs.Filespec]
path_globs_list: List[PathGlobs]
def __hash__(self):
return hash(self.address)
class BundleAdaptor(Struct):
"""A Struct to capture the args for the `bundle` object.
Bundles have filesets which we need to capture in order to execute them in the engine.
TODO: Bundles should arguably be Targets, but that distinction blurs in the `exp` examples
package, where a Target is just a collection of configuration.
"""
class AppAdaptor(TargetAdaptor):
def __init__(self, bundles=None, **kwargs):
"""
:param list bundles: A list of `BundleAdaptor` objects
"""
super().__init__(**kwargs)
self.bundles = bundles
@addressable_list(Exactly(BundleAdaptor))
def bundles(self):
"""The BundleAdaptors for this JvmApp."""
return self.bundles
@property
def field_adaptors(self) -> Tuple:
with exception_logging(logger, 'Exception in `field_adaptors` property'):
field_adaptors = super().field_adaptors
if getattr(self, 'bundles', None) is None:
return field_adaptors
bundles_field = self._construct_bundles_field()
return (*field_adaptors, bundles_field)
def _construct_bundles_field(self) -> BundlesField:
filespecs_list: List[wrapped_globs.Filespec] = []
path_globs_list: List[PathGlobs] = []
for bundle in self.bundles:
# NB: if a bundle has a rel_path, then the rel_root of the resulting file globs must be
# set to that rel_path.
rel_root = getattr(bundle, 'rel_path', self.address.spec_path)
base_globs = BaseGlobs.from_sources_field(bundle.fileset, rel_root)
path_globs = base_globs.to_path_globs(rel_root, GlobExpansionConjunction.all_match)
filespecs_list.append(base_globs.filespecs)
path_globs_list.append(path_globs)
return BundlesField(
self.address, self.bundles, filespecs_list, path_globs_list,
)
class JvmAppAdaptor(AppAdaptor): pass
class PythonAppAdaptor(AppAdaptor): pass
class ResourcesAdaptor(TargetAdaptor): pass
class RemoteSourcesAdaptor(TargetAdaptor):
def __init__(self, dest=None, **kwargs):
"""
:param dest: A target constructor.
"""
if not isinstance(dest, str):
dest = dest._type_alias
super().__init__(dest=dest, **kwargs)
class PythonTargetAdaptor(TargetAdaptor):
@property
def field_adaptors(self) -> Tuple:
with exception_logging(logger, 'Exception in `field_adaptors` property'):
field_adaptors = super().field_adaptors
if getattr(self, 'resources', None) is None:
return field_adaptors
base_globs = BaseGlobs.from_sources_field(self.resources, self.address.spec_path)
path_globs = base_globs.to_path_globs(self.address.spec_path, GlobExpansionConjunction.all_match)
sources_field = SourcesField(self.address,
'resources',
base_globs.filespecs,
base_globs,
path_globs,
lambda _: None)
return (*field_adaptors, sources_field)
class PythonBinaryAdaptor(PythonTargetAdaptor):
def validate_sources(self, sources):
if len(sources.files) > 1:
raise Target.IllegalArgument(self.address.spec,
'python_binary must have exactly 0 or 1 sources (typically used to specify the file '
'containing the entry point). '
'Other sources should instead be placed in a python_library, which '
'should be referenced in the python_binary\'s dependencies.'
)
class PythonTestsAdaptor(PythonTargetAdaptor): pass
class PythonAWSLambdaAdaptor(TargetAdaptor): pass
class PythonRequirementLibraryAdaptor(TargetAdaptor): pass
class PantsPluginAdaptor(PythonTargetAdaptor):
def get_sources(self) -> "GlobsWithConjunction":
return GlobsWithConjunction.for_literal_files(['register.py'], self.address.spec_path)
# TODO: Remove all the subclasses once we remove globs et al. The only remaining subclass would be
# Files, which should simply be unified into BaseGlobs.
class BaseGlobs(Locatable, metaclass=ABCMeta):
"""An adaptor class to allow BUILD file parsing from ContextAwareObjectFactories."""
@staticmethod
def from_sources_field(
sources: Union[None, str, Iterable[str], "BaseGlobs"], spec_path: str,
) -> "BaseGlobs":
"""Return a BaseGlobs for the given sources field."""
if sources is None:
return Files(spec_path=spec_path)
if isinstance(sources, BaseGlobs):
return sources
if isinstance(sources, str):
return Files(sources, spec_path=spec_path)
if (
isinstance(sources, (MutableSet, MutableSequence, tuple))
and all(isinstance(s, str) for s in sources)
):
return Files(*sources, spec_path=spec_path)
raise ValueError(f'Expected either a glob or list of literal sources. Got: {sources}')
@property
@abstractmethod
def path_globs_kwarg(self) -> str:
"""The name of the `PathGlobs` parameter corresponding to this BaseGlobs instance."""
@property
@abstractmethod
def legacy_globs_class(self) -> Type[wrapped_globs.FilesetRelPathWrapper]:
"""The corresponding `wrapped_globs` class for this BaseGlobs."""
# TODO: stop accepting an `exclude` argument once we remove `globs` et al.
def __init__(
self, *patterns: str, spec_path: str, exclude: Optional[List[str]] = None, **kwargs,
) -> None:
self._patterns = patterns
self._spec_path = spec_path
self._raw_exclude = exclude
if isinstance(exclude, str):
raise ValueError(f'Excludes should be a list of strings. Got: {exclude!r}')
if kwargs:
raise ValueError(f'kwargs not supported. Got: {kwargs}')
# TODO: once we remove `globs`, `rglobs`, and `zglobs`, we should change as follows:
# * Stop setting `self._parsed_include` and `self._parsed_exclude`. Only save `self._patterns`.
# All the below code should be deleted. For now, we must have these values to ensure that we
# properly parse the `globs()` function.
# * `to_path_globs()` will still need to strip the leading `!` from the exclude pattern, call
# `os.path.join`, and then prepend it back with `!`. But, it will do that when traversing
# over `self._patterns`, rather than `self._parsed_exclude`. We have a new unit test to
# ensure that we don't break this.
# * `filespecs()` must still need to split out the includes from excludes to maintain backwards
# compatibility. The below for loop splitting out the `self._patterns` should be moved
# into `filespecs()`. We have a new unit test to ensure that we don't break this.
self._parsed_include: List[str] = []
self._parsed_exclude: List[str] = []
if isinstance(self, Files):
for glob in self._patterns:
if glob.startswith("!"):
self._parsed_exclude.append(glob[1:])
else:
self._parsed_include.append(glob)
else:
self._parsed_include = self.legacy_globs_class.to_filespec(patterns)['globs']
self._parsed_exclude = self._parse_exclude(exclude or [])
@property
def filespecs(self) -> wrapped_globs.Filespec:
"""Return a filespecs dict representing both globs and excludes."""
filespecs: wrapped_globs.Filespec = {'globs': self._parsed_include}
if self._parsed_exclude:
filespecs['exclude'] = [{'globs': self._parsed_exclude}]
return filespecs
def to_path_globs(self, relpath: str, conjunction: GlobExpansionConjunction) -> PathGlobs:
"""Return a PathGlobs representing the included and excluded Files for these patterns."""
return PathGlobs(
globs=(
*(os.path.join(relpath, glob) for glob in self._parsed_include),
*(f"!{os.path.join(relpath, glob)}" for glob in self._parsed_exclude)
),
conjunction=conjunction,
)
def _parse_exclude(self, raw_exclude: List[str]) -> List[str]:
excluded_patterns: List[str] = []
for raw_element in raw_exclude:
exclude_filespecs = BaseGlobs.from_sources_field(raw_element, self._spec_path).filespecs
if exclude_filespecs.get('exclude'):
raise ValueError('Nested excludes are not supported: got {}'.format(raw_element))
excluded_patterns.extend(exclude_filespecs['globs'])
return excluded_patterns
def _gen_init_args_str(self) -> str:
all_arg_strs = []
positional_args = ', '.join(repr(p) for p in self._patterns)
if positional_args:
all_arg_strs.append(positional_args)
all_arg_strs.append(f"spec_path={self._spec_path}")
if self._raw_exclude:
all_arg_strs.append(f"exclude={self._raw_exclude}")
return ', '.join(all_arg_strs)
def __repr__(self) -> str:
# TODO: remove this once we finish deprecating `globs` et al. Use the __str__ implementation.
return f'{type(self).__name__}({self._gen_init_args_str()})'
def __str__(self) -> str:
return f'{self.path_globs_kwarg}({self._gen_init_args_str()})'
class Files(BaseGlobs):
path_globs_kwarg = 'files'
legacy_globs_class = wrapped_globs.Globs
def __str__(self) -> str:
return f"[{', '.join(repr(p) for p in self._patterns)}]"
class Globs(BaseGlobs):
path_globs_kwarg = 'globs'
legacy_globs_class = wrapped_globs.Globs
class RGlobs(BaseGlobs):
path_globs_kwarg = 'rglobs'
legacy_globs_class = wrapped_globs.RGlobs
class ZGlobs(BaseGlobs):
path_globs_kwarg = 'zglobs'
legacy_globs_class = wrapped_globs.ZGlobs
@dataclass(frozen=True)
class GlobsWithConjunction:
non_path_globs: BaseGlobs
conjunction: GlobExpansionConjunction
@classmethod
def for_literal_files(cls, file_paths: Sequence[str], spec_path: str) -> "GlobsWithConjunction":
return cls(Files(*file_paths, spec_path=spec_path), GlobExpansionConjunction.all_match)
def rules():
return [
UnionRule(HydrateableField, SourcesField),
UnionRule(HydrateableField, BundlesField),
]
|
import os
from typing import Any
from search_run.context import Context
from search_run.exceptions import CommandDoNotMatchException
from search_run.interpreter.base import BaseInterpreter
from search_run.interpreter.cmd import CmdInterpreter
class FileInterpreter(BaseInterpreter):
def __init__(self, cmd: Any, context: Context):
self.context = context
self.cmd = {}
if type(cmd) is dict and "file" in cmd:
self.cmd = cmd
return
if type(cmd) is str and (os.path.isfile(cmd) or os.path.isdir(cmd)):
self.cmd["file"] = cmd
return
raise CommandDoNotMatchException(
f"Not Valid {self.__class__.__name__} command {cmd}"
)
def get_executable(self):
if os.path.isdir(self.cmd["file"]):
return "nautilus"
executable = "vim"
filename, file_extension = os.path.splitext(self.cmd["file"])
if file_extension == ".php":
executable = "phpstormn"
elif file_extension == ".pdf":
# executable = "okular"
executable = "zathura"
elif file_extension == ".py":
executable = "vim"
elif file_extension == ".ipynb":
executable = "pycharm"
return executable
def interpret_default(self):
executable = self.get_executable()
cmd = f'{executable} '{self.cmd['file']}"'
final_cmd = self.cmd
if executable == "vim":
final_cmd["cli_cmd"] = cmd
else:
final_cmd["cmd"] = cmd
return CmdInterpreter(final_cmd, self.context).interpret_default()
def copiable_part(self):
return self.cmd["file"]
| import os
from typing import Any
from search_run.context import Context
from search_run.exceptions import CommandDoNotMatchException
from search_run.interpreter.base import BaseInterpreter
from search_run.interpreter.cmd import CmdInterpreter
class FileInterpreter(BaseInterpreter):
def __init__(self, cmd: Any, context: Context):
self.context = context
self.cmd = {}
if type(cmd) is dict and "file" in cmd:
self.cmd = cmd
return
if type(cmd) is str and (os.path.isfile(cmd) or os.path.isdir(cmd)):
self.cmd["file"] = cmd
return
raise CommandDoNotMatchException(
f"Not Valid {self.__class__.__name__} command {cmd}"
)
def get_executable(self):
if os.path.isdir(self.cmd["file"]):
return "nautilus"
executable = "vim"
filename, file_extension = os.path.splitext(self.cmd["file"])
if file_extension == ".php":
executable = "phpstormn"
elif file_extension == ".pdf":
# executable = "okular"
executable = "zathura"
elif file_extension == ".py":
executable = "vim"
elif file_extension == ".ipynb":
executable = "pycharm"
return executable
def interpret_default(self):
executable = self.get_executable()
cmd = f'{executable} "{self.cmd["file"]}"'
final_cmd = self.cmd
if executable == "vim":
final_cmd["cli_cmd"] = cmd
else:
final_cmd["cmd"] = cmd
return CmdInterpreter(final_cmd, self.context).interpret_default()
def copiable_part(self):
return self.cmd["file"]
|
# -*- coding: utf-8 -*-
"""Non-graphical part of the Loop step in a SEAMM flowchart"""
import logging
from pathlib import Path
import re
import sys
import traceback
import psutil
import pprint
import loop_step
import seamm
import seamm_util
import seamm_util.printing as printing
from seamm_util.printing import FormattedText as __
logger = logging.getLogger(__name__)
job = printing.getPrinter()
printer = printing.getPrinter("loop")
class Loop(seamm.Node):
def __init__(self, flowchart=None, extension=None):
"""Setup the non-graphical part of the Loop step in a
SEAMM flowchart.
Keyword arguments:
"""
logger.debug("Creating Loop {}".format(self))
self.table_handle = None
self.table = None
self._loop_value = None
self._loop_length = None
self._file_handler = None
super().__init__(
flowchart=flowchart, title="Loop", extension=extension, logger=logger
)
# This needs to be after initializing subclasses...
self.parameters = loop_step.LoopParameters()
@property
def version(self):
"""The semantic version of this module."""
return loop_step.__version__
@property
def iter_format(self):
if self._loop_length is None:
return "07"
else:
n = len(str(self._loop_length))
return f"0{n}"
@property
def git_revision(self):
"""The git version of this module."""
return loop_step.__git_revision__
@property
def working_path(self):
return Path(self.directory) / f"iter_{self._loop_value:{self.iter_format}}"
def description_text(self, P=None):
"""Return a short description of this step.
Return a nicely formatted string describing what this step will
do.
Keyword arguments:
P: a dictionary of parameter values, which may be variables
or final values. If None, then the parameters values will
be used as is.
"""
if not P:
P = self.parameters.values_to_dict()
text = ""
if P["type"] == "For":
subtext = "For {variable} from {start} to {end} by {step}\n"
elif P["type"] == "Foreach":
subtext = "Foreach {variable} in {values}\n"
elif P["type"] == "For rows in table":
subtext = "For rows in table {table}\n"
else:
subtext = "Loop type defined by {type}\n"
text += self.header + "\n" + __(subtext, **P, indent=4 * " ").__str__()
# Print the body of the loop
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
while next_node and not next_node.visited:
next_node.visited = True
text += "\n\n"
text += __(next_node.description_text(), indent=4 * " ").__str__()
next_node = next_node.next()
return text
def describe(self):
"""Write out information about what this node will do"""
self.visited = True
# The description
job.job(__(self.description_text(), indent=self.indent))
return self.exit_node()
def run(self):
"""Run a Loop step."""
# If the loop is empty, just go on
if self.loop_node() is None:
return self.exit_node()
# Set up the directory, etc.
super().run()
P = self.parameters.current_values_to_dict(
context=seamm.flowchart_variables._data
)
# Print out header to the main output
printer.important(self.description_text(P))
# Remove any redirection of printing.
if self._file_handler is not None:
job.removeHandler(self._file_handler)
self._file_handler = None
# Find the handler for job.out and set the level up
job_handler = None
out_handler = None
for handler in job.handlers:
if (
isinstance(handler, logging.FileHandler)
and "job.out" in handler.baseFilename
):
job_handler = handler
job_level = job_handler.level
job_handler.setLevel(printing.JOB)
elif isinstance(handler, logging.StreamHandler):
out_handler = handler
out_level = out_handler.level
out_handler.setLevel(printing.JOB)
# Set up some unchanging variables
if P["type"] == "For rows in table":
if self._loop_value is None:
self.table_handle = self.get_variable(P["table"])
self.table = self.table_handle["table"]
self.table_handle["loop index"] = True
self.logger.info(
"Initialize loop over {} rows in table {}".format(
self.table.shape[0], P["table"]
)
)
self._loop_value = -1
self._loop_length = self.table.shape[0]
printer.job(f" The loop will have {self._loop_length} iterations.")
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp,
None,
),
)
else:
self.set_variable("_loop_indices", (None,))
where = P["where"]
if where == "Use all rows":
pass
elif where == "Select rows where column":
column = P["query-column"]
op = P["query-op"]
value = P["query-value"]
if self.table.shape[0] > 0:
row = self.table.iloc[0]
tmp = pprint.pformat(row)
self.logger.debug(f"Row is\n{tmp}")
if column not in row:
for key in row.keys():
if column.lower() == key.lower():
column = key
break
if column not in row:
raise ValueError(
f"Looping over table with criterion on column '{column}': "
"that column does not exist."
)
else:
raise NotImplementedError(f"Loop cannot handle '{where}'")
# Cycle through the iterations, setting up the first time.
next_node = self
while next_node is not None:
if next_node is self:
next_node = self.loop_node()
if P["type"] == "For":
if self._loop_value is None:
self.logger.info(
"For {} from {} to {} by {}".format(
P["variable"], P["start"], P["end"], P["step"]
)
)
# See if loop variables are all integers
start = P["start"]
if isinstance(start, str):
start = float(start)
if isinstance(start, float) and start.is_integer():
start = int(start)
step = P["step"]
if isinstance(step, str):
step = float(step)
if isinstance(step, float) and step.is_integer():
step = int(step)
end = P["end"]
if isinstance(end, str):
end = float(end)
if isinstance(end, float) and end.is_integer():
end = int(end)
self.logger.info("Initializing loop")
self._loop_value = start
self.set_variable(P["variable"], self._loop_value)
# Loop to get length... range doesn't work for nonintegers
count = 0
tmp = start
while tmp <= end:
count += 1
tmp += step
self._loop_length = count
printer.job(
f" The loop will have {self._loop_length} iterations."
)
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable("_loop_indices", (*tmp, self._loop_value))
else:
self.set_variable("_loop_indices", (self._loop_value,))
self.set_variable("_loop_index", self._loop_value)
else:
self.write_final_structure()
self._loop_value += step
self.set_variable(P["variable"], self._loop_value)
# Set up the index variables
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp[0:-1],
self._loop_value,
),
)
self.set_variable("_loop_index", self._loop_value)
# See if we are at the end of loop
if self._loop_value > end:
self._loop_value = None
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
self.logger.info(
f"The loop over {P["variable"]} from {start} to "
f"{end} by {step} finished successfully"
)
break
self.logger.info(" Loop value = {}".format(self._loop_value))
elif P["type"] == "Foreach":
self.logger.info(f"Foreach {P["variable"]} in {P["values"]}")
if self._loop_value is None:
self._loop_value = -1
self._loop_length = len(P["values"])
printer.job(
f" The loop will have {self._loop_length} iterations."
)
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp,
None,
),
)
else:
self.set_variable("_loop_indices", (None,))
if self._loop_value >= 0:
self.write_final_structure()
self._loop_value += 1
if self._loop_value >= self._loop_length:
self._loop_value = None
self._loop_length = None
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
self.logger.info("The loop over value finished successfully")
# return the next node after the loop
break
value = P["values"][self._loop_value]
self.set_variable(P["variable"], value)
# Set up the index variables
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp[0:-1],
self._loop_value,
),
)
self.set_variable("_loop_index", self._loop_value)
self.logger.info(" Loop value = {}".format(value))
elif P["type"] == "For rows in table":
if self._loop_value >= 0:
self.write_final_structure()
# Loop until query is satisfied
while True:
self._loop_value += 1
if self._loop_value >= self.table.shape[0]:
break
if where == "Use all rows":
break
row = self.table.iloc[self._loop_value]
self.logger.debug(f"Query {row[column]} {op} {value}")
if op == "==":
if row[column] == value:
break
elif op == "!=":
if row[column] != value:
break
elif op == ">":
if row[column] > value:
break
elif op == ">=":
if row[column] >= value:
break
elif op == "<":
if row[column] < value:
break
elif op == "<=":
if row[column] <= value:
break
elif op == "contains":
if value in row[column]:
break
elif op == "does not contain":
if value not in row[column]:
break
elif op == "contains regexp":
if re.search(value, row[column]) is not None:
break
elif op == "does not contain regexp":
if re.search(value, row[column]) is None:
break
elif op == "is empty":
# Might be numpy.nan, and NaN != NaN hence odd test.
if row[column] == "" or row[column] != row[column]:
break
elif op == "is not empty":
if row[column] != "" and row[column] == row[column]:
break
else:
raise NotImplementedError(
f"Loop query '{op}' not implemented"
)
if self._loop_value >= self.table.shape[0]:
self._loop_value = None
self.delete_variable("_row")
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
# and the other info in the table handle
self.table_handle["loop index"] = False
self.table = None
self.table_handle = None
self.logger.info(
"The loop over table "
+ self.parameters["table"].value
+ " finished successfully"
)
# return the next node after the loop
break
# Set up the index variables
self.logger.debug(" _loop_value = {}".format(self._loop_value))
tmp = self.get_variable("_loop_indices")
self.logger.debug(" _loop_indices = {}".format(tmp))
self.set_variable(
"_loop_indices",
(*tmp[0:-1], self.table.index[self._loop_value]),
)
self.logger.debug(
" --> {}".format(self.get_variable("_loop_indices"))
)
self.set_variable("_loop_index", self.table.index[self._loop_value])
self.table_handle["current index"] = self.table.index[
self._loop_value
]
row = self.table.iloc[self._loop_value]
self.set_variable("_row", row)
self.logger.debug(" _row = {}".format(row))
# Direct most output to iteration.out
# A handler for the file
iter_dir = self.working_path
iter_dir.mkdir(parents=True, exist_ok=True)
if self._file_handler is not None:
self._file_handler.close()
job.removeHandler(self._file_handler)
self._file_handler = logging.FileHandler(iter_dir / "iteration.out")
self._file_handler.setLevel(printing.NORMAL)
formatter = logging.Formatter(fmt="{message:s}", style="{")
self._file_handler.setFormatter(formatter)
job.addHandler(self._file_handler)
# Add the iteration to the ids so the directory structure is
# reasonable
self.flowchart.reset_visited()
self.set_subids(
(*self._id, f"iter_{self._loop_value:{self.iter_format}}")
)
# Run through the steps in the loop body
try:
next_node = next_node.run()
except DeprecationWarning as e:
printer.normal("\nDeprecation warning: " + str(e))
traceback.print_exc(file=sys.stderr)
traceback.print_exc(file=sys.stdout)
except Exception as e:
printer.job(
f"Caught exception in loop iteration {self._loop_value}: {str(e)}"
)
with open(iter_dir / "stderr.out", "a") as fd:
traceback.print_exc(file=fd)
if "continue" in P["errors"]:
next_node = self
elif "exit" in P["errors"]:
break
else:
raise
if self.logger.isEnabledFor(logging.DEBUG):
p = psutil.Process()
self.logger.debug(pprint.pformat(p.open_files()))
self.logger.debug(f"Bottom of loop {next_node}")
# Return to the normally scheduled step, i.e. fall out of the loop.
# Remove any redirection of printing.
if self._file_handler is not None:
self._file_handler.close()
job.removeHandler(self._file_handler)
self._file_handler = None
if job_handler is not None:
job_handler.setLevel(job_level)
if out_handler is not None:
out_handler.setLevel(out_level)
return self.exit_node()
def write_final_structure(self):
"""Write the final structure"""
system_db = self.get_variable("_system_db")
system = system_db.system
if system is None:
return
configuration = system.configuration
if configuration is None:
return
if configuration.n_atoms > 0:
# MMCIF file has bonds
filename = self.working_path / "final_structure.mmcif"
text = None
try:
text = configuration.to_mmcif_text()
except Exception:
message = (
"Error creating the mmcif file at the end of the loop\n\n"
+ traceback.format_exc()
)
self.logger.critical(message)
if text is not None:
with open(filename, "w") as fd:
print(text, file=fd)
# CIF file has cell
if configuration.periodicity == 3:
filename = self.working_path / "final_structure.cif"
text = None
try:
text = configuration.to_cif_text()
except Exception:
message = (
"Error creating the cif file at the end of the loop"
"\n\n" + traceback.format_exc()
)
self.logger.critical(message)
if text is not None:
with open(filename, "w") as fd:
print(text, file=fd)
def default_edge_subtype(self):
"""Return the default subtype of the edge. Usually this is 'next'
but for nodes with two or more edges leaving them, such as a loop, this
method will return an appropriate default for the current edge. For
example, by default the first edge emanating from a loop-node is the
'loop' edge; the second, the 'exit' edge.
A return value of 'too many' indicates that the node exceeds the number
of allowed exit edges.
"""
# how many outgoing edges are there?
n_edges = len(self.flowchart.edges(self, direction="out"))
self.logger.debug(f"loop.default_edge_subtype, n_edges = {n_edges}")
if n_edges == 0:
return "loop"
elif n_edges == 1:
return "exit"
else:
return "too many"
def create_parser(self):
"""Setup the command-line / config file parser"""
parser_name = "loop-step"
parser = seamm_util.getParser()
# Remember if the parser exists ... this type of step may have been
# found before
parser_exists = parser.exists(parser_name)
# Create the standard options, e.g. log-level
super().create_parser(name=parser_name)
if not parser_exists:
# Any options for loop itself
pass
# Now need to walk through the steps in the loop...
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
while next_node and next_node != self:
next_node = next_node.create_parser()
return self.exit_node()
def set_id(self, node_id=()):
"""Sequentially number the loop subnodes"""
self.logger.debug("Setting ids for loop {}".format(self))
if self.visited:
return None
else:
self.visited = True
self._id = node_id
self.set_subids(self._id)
return self.exit_node()
def set_subids(self, node_id=()):
"""Set the ids of the nodes in the loop"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
n = 0
while next_node and next_node != self:
next_node = next_node.set_id((*node_id, str(n)))
n += 1
def exit_node(self):
"""The next node after the loop, if any"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "exit":
self.logger.debug(f"Loop, node after loop is: {edge.node2}")
return edge.node2
# loop is the last node in the flowchart
self.logger.debug("There is no node after the loop")
return None
def loop_node(self):
"""The first node in the loop body"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug(f"Loop, first node in loop is: {edge.node2}")
return edge.node2
# There is no body of the loop!
self.logger.debug("There is no loop body")
return None
| # -*- coding: utf-8 -*-
"""Non-graphical part of the Loop step in a SEAMM flowchart"""
import logging
from pathlib import Path
import re
import sys
import traceback
import psutil
import pprint
import loop_step
import seamm
import seamm_util
import seamm_util.printing as printing
from seamm_util.printing import FormattedText as __
logger = logging.getLogger(__name__)
job = printing.getPrinter()
printer = printing.getPrinter("loop")
class Loop(seamm.Node):
def __init__(self, flowchart=None, extension=None):
"""Setup the non-graphical part of the Loop step in a
SEAMM flowchart.
Keyword arguments:
"""
logger.debug("Creating Loop {}".format(self))
self.table_handle = None
self.table = None
self._loop_value = None
self._loop_length = None
self._file_handler = None
super().__init__(
flowchart=flowchart, title="Loop", extension=extension, logger=logger
)
# This needs to be after initializing subclasses...
self.parameters = loop_step.LoopParameters()
@property
def version(self):
"""The semantic version of this module."""
return loop_step.__version__
@property
def iter_format(self):
if self._loop_length is None:
return "07"
else:
n = len(str(self._loop_length))
return f"0{n}"
@property
def git_revision(self):
"""The git version of this module."""
return loop_step.__git_revision__
@property
def working_path(self):
return Path(self.directory) / f"iter_{self._loop_value:{self.iter_format}}"
def description_text(self, P=None):
"""Return a short description of this step.
Return a nicely formatted string describing what this step will
do.
Keyword arguments:
P: a dictionary of parameter values, which may be variables
or final values. If None, then the parameters values will
be used as is.
"""
if not P:
P = self.parameters.values_to_dict()
text = ""
if P["type"] == "For":
subtext = "For {variable} from {start} to {end} by {step}\n"
elif P["type"] == "Foreach":
subtext = "Foreach {variable} in {values}\n"
elif P["type"] == "For rows in table":
subtext = "For rows in table {table}\n"
else:
subtext = "Loop type defined by {type}\n"
text += self.header + "\n" + __(subtext, **P, indent=4 * " ").__str__()
# Print the body of the loop
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
while next_node and not next_node.visited:
next_node.visited = True
text += "\n\n"
text += __(next_node.description_text(), indent=4 * " ").__str__()
next_node = next_node.next()
return text
def describe(self):
"""Write out information about what this node will do"""
self.visited = True
# The description
job.job(__(self.description_text(), indent=self.indent))
return self.exit_node()
def run(self):
"""Run a Loop step."""
# If the loop is empty, just go on
if self.loop_node() is None:
return self.exit_node()
# Set up the directory, etc.
super().run()
P = self.parameters.current_values_to_dict(
context=seamm.flowchart_variables._data
)
# Print out header to the main output
printer.important(self.description_text(P))
# Remove any redirection of printing.
if self._file_handler is not None:
job.removeHandler(self._file_handler)
self._file_handler = None
# Find the handler for job.out and set the level up
job_handler = None
out_handler = None
for handler in job.handlers:
if (
isinstance(handler, logging.FileHandler)
and "job.out" in handler.baseFilename
):
job_handler = handler
job_level = job_handler.level
job_handler.setLevel(printing.JOB)
elif isinstance(handler, logging.StreamHandler):
out_handler = handler
out_level = out_handler.level
out_handler.setLevel(printing.JOB)
# Set up some unchanging variables
if P["type"] == "For rows in table":
if self._loop_value is None:
self.table_handle = self.get_variable(P["table"])
self.table = self.table_handle["table"]
self.table_handle["loop index"] = True
self.logger.info(
"Initialize loop over {} rows in table {}".format(
self.table.shape[0], P["table"]
)
)
self._loop_value = -1
self._loop_length = self.table.shape[0]
printer.job(f" The loop will have {self._loop_length} iterations.")
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp,
None,
),
)
else:
self.set_variable("_loop_indices", (None,))
where = P["where"]
if where == "Use all rows":
pass
elif where == "Select rows where column":
column = P["query-column"]
op = P["query-op"]
value = P["query-value"]
if self.table.shape[0] > 0:
row = self.table.iloc[0]
tmp = pprint.pformat(row)
self.logger.debug(f"Row is\n{tmp}")
if column not in row:
for key in row.keys():
if column.lower() == key.lower():
column = key
break
if column not in row:
raise ValueError(
f"Looping over table with criterion on column '{column}': "
"that column does not exist."
)
else:
raise NotImplementedError(f"Loop cannot handle '{where}'")
# Cycle through the iterations, setting up the first time.
next_node = self
while next_node is not None:
if next_node is self:
next_node = self.loop_node()
if P["type"] == "For":
if self._loop_value is None:
self.logger.info(
"For {} from {} to {} by {}".format(
P["variable"], P["start"], P["end"], P["step"]
)
)
# See if loop variables are all integers
start = P["start"]
if isinstance(start, str):
start = float(start)
if isinstance(start, float) and start.is_integer():
start = int(start)
step = P["step"]
if isinstance(step, str):
step = float(step)
if isinstance(step, float) and step.is_integer():
step = int(step)
end = P["end"]
if isinstance(end, str):
end = float(end)
if isinstance(end, float) and end.is_integer():
end = int(end)
self.logger.info("Initializing loop")
self._loop_value = start
self.set_variable(P["variable"], self._loop_value)
# Loop to get length... range doesn't work for nonintegers
count = 0
tmp = start
while tmp <= end:
count += 1
tmp += step
self._loop_length = count
printer.job(
f" The loop will have {self._loop_length} iterations."
)
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable("_loop_indices", (*tmp, self._loop_value))
else:
self.set_variable("_loop_indices", (self._loop_value,))
self.set_variable("_loop_index", self._loop_value)
else:
self.write_final_structure()
self._loop_value += step
self.set_variable(P["variable"], self._loop_value)
# Set up the index variables
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp[0:-1],
self._loop_value,
),
)
self.set_variable("_loop_index", self._loop_value)
# See if we are at the end of loop
if self._loop_value > end:
self._loop_value = None
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
self.logger.info(
f"The loop over {P['variable']} from {start} to "
f"{end} by {step} finished successfully"
)
break
self.logger.info(" Loop value = {}".format(self._loop_value))
elif P["type"] == "Foreach":
self.logger.info(f"Foreach {P['variable']} in {P['values']}")
if self._loop_value is None:
self._loop_value = -1
self._loop_length = len(P["values"])
printer.job(
f" The loop will have {self._loop_length} iterations."
)
if self.variable_exists("_loop_indices"):
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp,
None,
),
)
else:
self.set_variable("_loop_indices", (None,))
if self._loop_value >= 0:
self.write_final_structure()
self._loop_value += 1
if self._loop_value >= self._loop_length:
self._loop_value = None
self._loop_length = None
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
self.logger.info("The loop over value finished successfully")
# return the next node after the loop
break
value = P["values"][self._loop_value]
self.set_variable(P["variable"], value)
# Set up the index variables
tmp = self.get_variable("_loop_indices")
self.set_variable(
"_loop_indices",
(
*tmp[0:-1],
self._loop_value,
),
)
self.set_variable("_loop_index", self._loop_value)
self.logger.info(" Loop value = {}".format(value))
elif P["type"] == "For rows in table":
if self._loop_value >= 0:
self.write_final_structure()
# Loop until query is satisfied
while True:
self._loop_value += 1
if self._loop_value >= self.table.shape[0]:
break
if where == "Use all rows":
break
row = self.table.iloc[self._loop_value]
self.logger.debug(f"Query {row[column]} {op} {value}")
if op == "==":
if row[column] == value:
break
elif op == "!=":
if row[column] != value:
break
elif op == ">":
if row[column] > value:
break
elif op == ">=":
if row[column] >= value:
break
elif op == "<":
if row[column] < value:
break
elif op == "<=":
if row[column] <= value:
break
elif op == "contains":
if value in row[column]:
break
elif op == "does not contain":
if value not in row[column]:
break
elif op == "contains regexp":
if re.search(value, row[column]) is not None:
break
elif op == "does not contain regexp":
if re.search(value, row[column]) is None:
break
elif op == "is empty":
# Might be numpy.nan, and NaN != NaN hence odd test.
if row[column] == "" or row[column] != row[column]:
break
elif op == "is not empty":
if row[column] != "" and row[column] == row[column]:
break
else:
raise NotImplementedError(
f"Loop query '{op}' not implemented"
)
if self._loop_value >= self.table.shape[0]:
self._loop_value = None
self.delete_variable("_row")
# Revert the loop index variables to the next outer loop
# if there is one, or remove them.
tmp = self.get_variable("_loop_indices")
if len(tmp) <= 1:
self.delete_variable("_loop_indices")
self.delete_variable("_loop_index")
else:
self.set_variable("_loop_indices", tmp[0:-1])
self.set_variable("_loop_index", tmp[-2])
# and the other info in the table handle
self.table_handle["loop index"] = False
self.table = None
self.table_handle = None
self.logger.info(
"The loop over table "
+ self.parameters["table"].value
+ " finished successfully"
)
# return the next node after the loop
break
# Set up the index variables
self.logger.debug(" _loop_value = {}".format(self._loop_value))
tmp = self.get_variable("_loop_indices")
self.logger.debug(" _loop_indices = {}".format(tmp))
self.set_variable(
"_loop_indices",
(*tmp[0:-1], self.table.index[self._loop_value]),
)
self.logger.debug(
" --> {}".format(self.get_variable("_loop_indices"))
)
self.set_variable("_loop_index", self.table.index[self._loop_value])
self.table_handle["current index"] = self.table.index[
self._loop_value
]
row = self.table.iloc[self._loop_value]
self.set_variable("_row", row)
self.logger.debug(" _row = {}".format(row))
# Direct most output to iteration.out
# A handler for the file
iter_dir = self.working_path
iter_dir.mkdir(parents=True, exist_ok=True)
if self._file_handler is not None:
self._file_handler.close()
job.removeHandler(self._file_handler)
self._file_handler = logging.FileHandler(iter_dir / "iteration.out")
self._file_handler.setLevel(printing.NORMAL)
formatter = logging.Formatter(fmt="{message:s}", style="{")
self._file_handler.setFormatter(formatter)
job.addHandler(self._file_handler)
# Add the iteration to the ids so the directory structure is
# reasonable
self.flowchart.reset_visited()
self.set_subids(
(*self._id, f"iter_{self._loop_value:{self.iter_format}}")
)
# Run through the steps in the loop body
try:
next_node = next_node.run()
except DeprecationWarning as e:
printer.normal("\nDeprecation warning: " + str(e))
traceback.print_exc(file=sys.stderr)
traceback.print_exc(file=sys.stdout)
except Exception as e:
printer.job(
f"Caught exception in loop iteration {self._loop_value}: {str(e)}"
)
with open(iter_dir / "stderr.out", "a") as fd:
traceback.print_exc(file=fd)
if "continue" in P["errors"]:
next_node = self
elif "exit" in P["errors"]:
break
else:
raise
if self.logger.isEnabledFor(logging.DEBUG):
p = psutil.Process()
self.logger.debug(pprint.pformat(p.open_files()))
self.logger.debug(f"Bottom of loop {next_node}")
# Return to the normally scheduled step, i.e. fall out of the loop.
# Remove any redirection of printing.
if self._file_handler is not None:
self._file_handler.close()
job.removeHandler(self._file_handler)
self._file_handler = None
if job_handler is not None:
job_handler.setLevel(job_level)
if out_handler is not None:
out_handler.setLevel(out_level)
return self.exit_node()
def write_final_structure(self):
"""Write the final structure"""
system_db = self.get_variable("_system_db")
system = system_db.system
if system is None:
return
configuration = system.configuration
if configuration is None:
return
if configuration.n_atoms > 0:
# MMCIF file has bonds
filename = self.working_path / "final_structure.mmcif"
text = None
try:
text = configuration.to_mmcif_text()
except Exception:
message = (
"Error creating the mmcif file at the end of the loop\n\n"
+ traceback.format_exc()
)
self.logger.critical(message)
if text is not None:
with open(filename, "w") as fd:
print(text, file=fd)
# CIF file has cell
if configuration.periodicity == 3:
filename = self.working_path / "final_structure.cif"
text = None
try:
text = configuration.to_cif_text()
except Exception:
message = (
"Error creating the cif file at the end of the loop"
"\n\n" + traceback.format_exc()
)
self.logger.critical(message)
if text is not None:
with open(filename, "w") as fd:
print(text, file=fd)
def default_edge_subtype(self):
"""Return the default subtype of the edge. Usually this is 'next'
but for nodes with two or more edges leaving them, such as a loop, this
method will return an appropriate default for the current edge. For
example, by default the first edge emanating from a loop-node is the
'loop' edge; the second, the 'exit' edge.
A return value of 'too many' indicates that the node exceeds the number
of allowed exit edges.
"""
# how many outgoing edges are there?
n_edges = len(self.flowchart.edges(self, direction="out"))
self.logger.debug(f"loop.default_edge_subtype, n_edges = {n_edges}")
if n_edges == 0:
return "loop"
elif n_edges == 1:
return "exit"
else:
return "too many"
def create_parser(self):
"""Setup the command-line / config file parser"""
parser_name = "loop-step"
parser = seamm_util.getParser()
# Remember if the parser exists ... this type of step may have been
# found before
parser_exists = parser.exists(parser_name)
# Create the standard options, e.g. log-level
super().create_parser(name=parser_name)
if not parser_exists:
# Any options for loop itself
pass
# Now need to walk through the steps in the loop...
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
while next_node and next_node != self:
next_node = next_node.create_parser()
return self.exit_node()
def set_id(self, node_id=()):
"""Sequentially number the loop subnodes"""
self.logger.debug("Setting ids for loop {}".format(self))
if self.visited:
return None
else:
self.visited = True
self._id = node_id
self.set_subids(self._id)
return self.exit_node()
def set_subids(self, node_id=()):
"""Set the ids of the nodes in the loop"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug("Loop, first node of loop is: {}".format(edge.node2))
next_node = edge.node2
n = 0
while next_node and next_node != self:
next_node = next_node.set_id((*node_id, str(n)))
n += 1
def exit_node(self):
"""The next node after the loop, if any"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "exit":
self.logger.debug(f"Loop, node after loop is: {edge.node2}")
return edge.node2
# loop is the last node in the flowchart
self.logger.debug("There is no node after the loop")
return None
def loop_node(self):
"""The first node in the loop body"""
for edge in self.flowchart.edges(self, direction="out"):
if edge.edge_subtype == "loop":
self.logger.debug(f"Loop, first node in loop is: {edge.node2}")
return edge.node2
# There is no body of the loop!
self.logger.debug("There is no loop body")
return None
|
import glob
import json
import copy
import importlib
import threading
import logging
import pytz
#for tables
import numpy
import numpy as np
import datetime
import dateutil.parser
import sys
import os
import time
import uuid
import hashlib
import random
import traceback
from dates import *
# type hints
from typing import List
import modeltemplates
# for Observer
from queue import Queue
from queue import Empty
import utils
from timeseries import TimeSeriesTable
from dates import *
import inspect
from utils import str_lim
"""
next Todo
-
- execute: problem im thread mit der Ausführung
- code documentation
- google document
-
"""
sys.path.append("./plugins") #for the importlib loader, doesn't understand relative paths
#sys.path.append("./private") #for the importlib loader, doesn't understand relative paths
myGlobalDir = os.path.dirname(os.path.realpath(__file__)) # holds the directory of this script
def getRandomId():
return '%08x' % random.randrange(16 ** 8)
#used as an OOP wrapper for the flat and procedural style of the model class
class Node():
""" used as an OOP wrapper for the flat and procedural style of the model class
it is a convenient way to access nodes and their hierarchy and internals
"""
def __init__(self,myModel,myId):
""" a node can be created by calling the
mynode = model.get_node("root.mynode") or
mynode = Node(mymodel,"123")
Returns:
a node object for further access to values, hierarchy etc.
"""
self.model = myModel # this is not a copy!!
self.id = myId
def __repr__(self):
return 'Node(id={:}, value={:})'.format(self.id, self.get_value())
def get_value(self):
""" Returns:
the "value" property of the node
None if node has no "value"
"""
return self.model.get_value(self.id)
#####################
# time series node API
def get_time_series(self, start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None):
"""
Returns
dict with ["__time":[...],"values":[...]
"""
browsePath = self.model.get_browse_path(self.id)
data = self.model.time_series_get_table(variables = [browsePath],
tableDescriptor=None,
start=start,
end=end,
noBins=noBins,
includeIntervalLimits=includeIntervalLimits,
resampleTimes=resampleTimes,
format=format,
toList=toList,
resampleMethod=resampleMethod)
if data !={} :
return data[browsePath]
else:
return None
def get_raw_time_series(self,start=None,end=None):
return self.model.time_series_get_raw(self.id,start=start,end=end)
def add_references(self,targetNodes,deleteAll=False):
"""
add references from the node to the targets
Args:
targetNodes: node or list of nodes to reference to
deleteAll: if set true, we delete all existing references before creating the new
Returns
True/False for success/error
"""
if deleteAll:
self.model.remove_forward_refs(self.id)#this deletes all existing
if type(targetNodes) is not list:
targetNodes = [targetNodes]
targetIds = [node.get_id() for node in targetNodes]
return self.model.add_forward_refs(self.id,targetIds)
def set_value(self,value):
"""
special support for "column" types: if a scalar is given, we make a "full" array
"""
if self.get_properties()["type"] == "column":
if type(value) != numpy.ndarray and type(value) != list:
#we have a scalar, so we set it
#get the len of the table
timeNode = self.get_table_time_node()
length = len(timeNode.get_value())
value = numpy.full(length,value,dtype=numpy.float64)
return self.model.set_value(self.id,value)
def set_time_series(self,values=None,times=None):
"""
replaces the time series with value and times, it deletes the existing
"""
return self.model.time_series_set(self.id,values=values,times=times)
def insert_time_series(self,values=None,times=None,allowDuplicates = False):
"""
insert data, if the time stamp exists already, we replace it
"""
return self.model.time_series_insert(self.id,values=values, times=times, allowDuplicates=allowDuplicates)
def merge_time_series(self,values=None, times=None):
""" merge the times series of mergeNode into this node"""
return self.model.time_series_merge(self.id,values = values,times=times)
def delete_time_series(self,start=None,end=None):
return self.model.time_series_delete_area(self.id, start=start, end=end)
#####################
# event series node API
def get_event_series(self, start=None, end=None, format="default",eventFilter = None):
return self.model.event_series_get(self.id,start=start,end=end,format=format,eventFilter=eventFilter)
def set_event_series(self, values=None, times=None):
"""
replaces the event series with value and times, it deletes the existing
"""
return self.model.event_series_set(self.id,values=values,times=times)
def insert_event_series(self,values=None,times=None,allowEventDuplicates = False):
return self.model.event_series_insert(self.id,values,times,allowEventDuplicates=allowEventDuplicates)
def delete_event_series(self,start=None, end = None, eventsToDelete=[]):
return self.model.event_series_delete(desc=self.id,start=start,end=end,eventsToDelete=eventsToDelete)
def get_parent(self):
""" Returns:
a Node()-instance of the parent of the current node,
None if no parent available
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
return self.model.get_node(nodeInfo["parent"])
else:
return None
def get_child(self,childName):
"""
Args:
childName(nodedescription):
Returns:
a Node() instance of the child holding the childName
None if the current node does not have a child with the name childName
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.model.get_node_info(childId)
if childInfo["name"] == childName:
return self.model.get_node(childId)
return None
def delete(self):
"""
delete this node from the model, note that the object itself it not destroyed, but it is disconnected from the model
so should not be used anymore afterwards
:return:
"""
return self.model.delete_node(self.id)
def create_child(self,name=None,type="folder",value=None,properties={}):
"""
create a node under the current node, if the node exists already, we get the node
Args:
name [string] the child name
type [string] the type of the node
value [any] direct assignment of values
properies [dict] a dict with further settings of properies like value, type etc
Returns:
the node objects or none if not available
"""
if name == None:
name = '%08x' % random.randrange(16 ** 8)
id = self.model.create_node(parent=self.id,name=name,type=type,value=value,properties=properties)
if id:
return self.model.get_node(id)
else:
#we try to get it anyways
return self.get_child(name)
def get_children(self, deepLevel=1):
""" Returns:
a list of Node()-objects which are the children of the current node
args:
deepLevel: set >1 to get children and childrens' children
"""
nodeInfo = self.model.get_node_info(self.id)
children = []
if nodeInfo["children"]:
children=[self.model.get_node(id) for id in nodeInfo['children'] ]
while deepLevel>1:
deepLevel -=1
childrenOld = children.copy()
for child in childrenOld:
children.extend(child.get_children())
#remove dublicates via id:
childDict = {child.get_id():child for child in children} # same keys(id) will only be there once
children = list(childDict.values())
return children
def get_properties(self):
""" Returns:
a dictionary holding the properties of the node like {"value":123,"name":"myVariable","children":...}
"""
nodeInfo = self.model.get_node_info(self.id)
return copy.deepcopy(nodeInfo)
def get_type(self):
"""
Retuns:
the type of the node
"""
return self.get_property("type")
def get_property(self,property):
"""
Args:
property: the property name asked for
Returns:
the value of the property behind the property given
None if the property does not exist
"""
nodeDict =self.get_properties()
if property in nodeDict:
return self.get_properties()[property]
else:
return None
def set_properties(self,properties):
"""
add or modify properties of a node
Args:
properties [dict] holding key,value for the properties
Returns
True for ok, False for not done
"""
return self.model.set_properties(properties,nodeDesc=self.id)
def get_model(self):
""" this function should only be used for testing, we should never be in the need to access the model inside
Returns:
the underlying model of type Model() class
"""
return self.model
def get_target_ids(self):
""" this function returns the target ids of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
return self.get_properties()["forwardRefs"]
def get_target(self):
""" this function returns the first direct taret node of a referencer not resolving the leaves"""
if self.get_properties()["type"] == "referencer":
targets = self.get_properties()["forwardRefs"]
if targets:
return Node(self.model,targets[0])
return None
def get_targets(self):
""" this function returns the target Nodes of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
targets = []
for nodeid in self.get_properties()["forwardRefs"]:
targets.append(Node(self.model,nodeid))
return targets
def get_leaves(self):
""" this function returns a list of Nodes containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all nodes which are considered leaves as a list of Node() objects
"""
leaves = self.model.get_leaves(self.id) # a list of node dicts
leaveNodes = []
for leave in leaves:
leaveNodes.append(Node(self.model,leave["id"]))
return leaveNodes
def get_leaves_ids(self):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
return self.model.get_leaves_ids(self.id)
def get_id(self):
""" Returns: the nodeid (which is generated by the system) """
return self.id
def get_browse_path(self):
""" Returns: the browsepath along the style "root.myfolder.myvariable..." """
return self.model.get_browse_path(self.id)
def get_name(self):
""" Returns: the name of the node without the path """
return self.model.get_node_info(self.id)["name"]
def get_node(self,desc):
return self.model.get_node(desc)
def get_table_time_node(self):
""" if the current node belongs to a table, then we can get the time node
a node
Returns:
(obj Node()) the node of type
"""
timeNode = self.model.find_table_time_node(self.id)
if timeNode:
return Node(self.model,timeNode)
else:
return None
def get_table_len(self):
"""
if the current node is a type "table", we get the current len
Return:
the len of the columns of the table
"""
return self.model.get_table_len(self.id)
def get_table_node(self):
"""
if the current node is a column of a time series table, we get the according table node of type "table"
Return:
a Node() of type "table" which is the table of the current node
"""
tableId = self.model.find_table_node(self.id)
if tableId:
return self.model.get_node(tableId)
else:
return None
def get_time_indices(self,startTime,endTime):
""" works only for the time node, it looks to find the timeField node of the table to which the node belongs
then tries to find start and end time inside the timeField column and returns the index (rownumber) which are
INSIDE the given startTime, endTime
Args:
startTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
endTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
Returns:
(numpy array) indexnumbers containing the rows of the table that fall inside the given [startTime, endTime] intervall
None for not finding table, timeField, start-endTimes whatsoever
"""
try:
startTime = date2secs(startTime)
endTime = date2secs(endTime)
times = numpy.asarray(self.get_value())
indices = numpy.where((times >= startTime) & (times <= endTime))[0]
return indices
except:
return None
def execute(self):
return self.model.execute_function(self.id)
def execute_synchronous(self):
return self.model.execute_synchronous(self.id)
def instantiate(self):
return self.model.instantiate_object(self.id)
def get_object(self):
return self.model.get_object(self.id)
def get_logger(self):
return self.model.logger
def connect_to_table(self,tableNode):
"""
connect a node to a table, it must be a column type
the node itself will be reset and filled with numpy.inf and prepared to work with the table:
an array will be created with np.inf of the current table size
and the column will be hooked to the table referencer
Returns:
True on success
"""
if self.get_property("type") != "column":
return False
#now make an array of np.inf of the current table size and apply the value
timeNode = tableNode.get_table_time_node()
if not timeNode:
return False
tableLen = len(timeNode.get_value())
self.set_value(numpy.full(tableLen,numpy.inf,dtype=numpy.float64))
#now hook it as column to the table
#check if we are part of it already
for column in tableNode.get_child("columns").get_leaves():
if column.get_id() == self.get_id():
return True
#now connect it to the table
return self.model.add_forward_refs(tableNode.get_child("columns").get_id(), [self.id],allowDuplicates=False)
def get_columns(self):
"""
get the columns nodes of a table without the time node
can be executed on the table node
Returns:
list of node objects which are the columns of the table without the time node
"""
if self.get_properties()["type"] != "table":
return None
nodes = self.get_child("columns").get_leaves()
timeNode = self.get_table_time_node()
return [node for node in self.get_child("columns").get_leaves() if node.get_id() != timeNode.get_id()]
class Observer:
# The observer needs a reference to the model, because the rest service is not able to detect
# when the client connection is closed, but the observer message handling loop can detect it
# this way the observer can detach itself from the model, when the client is disconnected
# there are two queues involved: the updateQueue holding events pushed by the observers from the model
# and the eventQueues which is the filtered updateQueue (filtering avoids sending multiple identical events in short time
def __init__(self, model):
self.model = model
# Message queues to store the new events and last time stamps
self.updateQueue = Queue()
self.eventQueues = {} # k,v = event:{"lasttimestamp":datetime,"queue":Queue()
self.minWaitTime = 0.500 #in seconds float
# use the logger of th model
self.logger = self.model.logger
self.lock = threading.RLock()
#preload queue: this is a workaround as the browser does not get the first 2 events immideately
# it actually doesn't help ..?
for i in range(2):
self.updateQueue.put({"event":"_preload","id":"","data":{"xy":str(i)}})
def update(self, event):
"""
inform about the occurrence of an event,
Args:
event "string": the
:param event:
:return:
"""
defaultEvent = {"data":"","id":"","event":""}
defaultEvent.update(event)
self.updateQueue.put(defaultEvent)
#self.logger.debug(f"Qup {id(self)} {defaultEvent["event"]}, {defaultEvent["id"]}")
def get_event(self):
"""
get the next event from the observerclass, this is used a generator for the webserver
we also filter out events to avoid a train of identical events
the filtering uses the self.minWaitTime, within that period we don't sent identical event;
events are "identical", if they have the same "event" and "data"
"""
self.logger.debug(f"Observer {id(self)} get_event()")
stop_event_processing = False # This flag shows when to stop the event processing
while not stop_event_processing:
try:
# Try to retrieve an item from the update queue
event = self.updateQueue.get(block=True,timeout=self.minWaitTime)
#self.logger.debug(f"event pick {event}")
#create an eventIdentification, this is used to filter out repeated events
# we select the eventIdentificton in a way that events that have unique information keeps them
# we take all information from the event.data field, so only the events WITHOUT unique data will be removed
# those are typically the tree.update events
eventIdentification = event["event"] #the event name itself
for key in event["data"]:
eventIdentification = eventIdentification+str(key)+str(event["data"][key])
#now sort this event into the queues of eventids
if eventIdentification not in self.eventQueues:
# this is a new type/identificatin of event, create an entry in the event queue
# put the event in the queue and make the last timestamp so that we send it out now
self.eventQueues[eventIdentification]={"lastTimeStamp":0,"queue":Queue()}
self.eventQueues[eventIdentification]["queue"].put(event)
except Exception as ex:
# this happens if we time out the queue get, no problem, just continue
#self.logger.error(f"Exception observer {id(self)} thread self.updateQueue.get: {ex},{str(sys.exc_info()[0])}")
pass
#now go over all the sorted event queues and check what to send out:
if 0:
#show the queues
for k,v in self.eventQueues.items():
q = v["queue"]
qLen = q.qsize()
#self.logger.debug(f"Queue {k}: len {qLen} {[q.queue[id] for id in range(qLen)]}")
try:
now = time.time()
for eventIdentification,entry in self.eventQueues.items(): # entry is {"lasttimestampe": "queue":
#self.logger.debug(f"observer {id(self)} check queue of {eventIdentification} size: {entry["queue"].qsize()},last:{entry["lastTimeStamp"]}, now:{now}, ready: {now > (entry["lastTimeStamp"]+self.minWaitTime)}")
if (not entry["queue"].empty()) and (now > (entry["lastTimeStamp"]+self.minWaitTime)):
#send this event, the timeout was met, we pull the first event from the queue, trash the remaining ones
"""
old code
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
myEvent = self.eventQueues[eventIdentification]["queue"].get()
event_string = f'id:{myEvent['id']}\nevent: {myEvent['event']}\ndata: {myEvent['data']}\n\n'
self.logger.debug(f'Observer {id(self)} sending event: {event_string}')
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]["queue"].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
self.eventQueues[eventIdentification]["queue"].get(False)
self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
"""
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
#self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]["queue"].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
myEvent = self.eventQueues[eventIdentification]["queue"].get(False)
event_string = f'id:{myEvent['id']}\nevent: {myEvent['event']}\ndata: {json.dumps(myEvent['data'])}\n\n'
#self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
# This exception is raised when the generator function is exited, which means that the
# client side connection to the SSE stream was close, thus the observer could be removed
except GeneratorExit:
self.logger.warning(f"Observer {id(self)} connection closed.")
stop_event_processing = True
self.logger.warning(f"Observer {id(self)} exiting event processing.")
# Detach this observer from the model
self.model.detach_observer(self)
class Model:
nodeTemplate = {"id": None, "name": None, "type": "folder", "parent": None, "children": [], "backRefs": [],"forwardRefs":[],"value":None}
def __init__(self):
"""
initialize an empty Model object, it will contain the root Node as folder with Id "0"
during the initialization, also the plug-ins (all files in the ./plugin) are loaded:
all templates and functions are imported
a model holds all modelling information and data to work on
"""
self.version = 0.1
self.model = {"1":{
"name":"root",
"type":"folder",
"children":[],
"parent":"0",
"id":"1",
"backRefs":[],
"forwardRefs":[],
"version":self.version
}}
self.disableObserverCounter = 0 # a counting sema (under manual lock) for the disabling: if zero the notify_observers is active otherwise not
self.__init_logger(logging.DEBUG)
self.globalIdCounter=1 # increased on every creation of a node, it holds the last inserted node id
self.idCreationHash = True # if this is true, we create the id per hash, not per counter
self.ts = TimeSeriesTable()
self.functions={} # a dictionary holding all functions from ./plugins
self.templates={} # holding all templates from ./plugins
self.lock = threading.RLock()
self.executeFunctionRunning = False # set to true, makes sure only one functions runs at a time
self.objectClasses = {} # a dictionaryholding all object clases from the /plugins
self.import_default_plugins()
self.differentialHandles ={} # containing model_copy entries to support differential queries
self.diffHandleCounter = 0 # used only for debugging
self.differentialHandlesMaxPerUser = 10
self.currentModelName = "emptyModel" # the current name of the model
self.modelUpdateCounter = 0 #this is for the tree observer, on any change, we update the counter
self.observerStatus = {} # a dict holding the key = observerid and value : the needed status of an observer processing
self.executionQueue = Queue()
self.observers = []
self.sse_event_id = 1
self.start_function_execution_thread()
def __del__(self):
self.functionExecutionRunning = False # stop the execution thread of functions
def __init_logger(self, level):
"""setup the logger object"""
self.logger = logging.getLogger("Model-"+'%08x' % random.randrange(16 ** 8))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
logfile = logging.FileHandler("./log/model.log")
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
self.logger.setLevel(level)
def __get_id(self, id):
"""
Args:
id (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
or a "fancy" path mixed like "1000.min" where 1000 is a node id, only the first is allowed as Nodeid, the followings are names
Returns:
(string): the node id as string
None if not found
"""
if id in self.model:
return id
#maybe a browsepath?
try:
names = id.split('.')
if names[0]=="root":
names = names[1:]
actualSearchId = "1"
elif names[0] in self.model:
#self.logger.debug(f"fancy browsepath {names}")
actualSearchId = names[0]
names = names[1:]
else:
return None
except:
return None
#now we start at root
for name in names:
nextSearchId = None
for childId in self.model[actualSearchId]["children"]:
if self.model[childId]["name"] == name:
#this is a match
nextSearchId = childId
break
if not nextSearchId:
return None
#we found it, go deeper now
actualSearchId = nextSearchId
return actualSearchId
def get_node(self,desc):
""" instantiate a Node() object on the node given as desc
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if id:
return Node(self,id)
def find_node(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return the first match
with
"""
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
continue
if self.model[id][k]!=v:
continue
return Node(self,id)
return Node(self,id)
return None
def find_nodes(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return all matches as nodes
"""
found = []
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
break
if self.model[id][k]!=v:
break
found.append(Node(self,id))
return found
def get_node_info(self,desc,includeLongValues=True):
"""
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
includeLongValue if true, we include values for columns and files
Returns:
(dict): a dictionary holding all properties of the node includin references and children
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
#we do not include values of columns and files
if self.model[id]["type"] in ["column","file","timeseries"]:
if includeLongValues:
return copy.deepcopy(self.model[id])
else:
return {k:v for k,v in self.model[id].items() if k!="value"}
elif self.model[id]["type"]== "object":
return {k: v for k, v in self.model[id].items() if k != "object"} # don't take the "object" key
else:
#take all
return copy.deepcopy(self.model[id])
def __get_node_with_children(self,id,nodes,includeForwardRefs=True):
"""
recursive helper for get_branch
"""
if self.model[id]["type"] in ["file","column","timeseries"]:
#we do not take these values
nodes[id]={k:v for k,v in self.model[id].items() if k!="value"} # copy the whole but leave out the value
elif self.model[id]["type"] == "referencer":
nodes[id] = self.model[id]
if includeForwardRefs:
#for referencers, we take the direct targets
for targetId in self.model[id]["forwardRefs"]:
if self.model[targetId]["type"] in ["file", "column","timeseries"]:
# we do not take these values
target = {k: v for k, v in self.model[id].items() if k != "value"} # copy the whole but leave out the value
else:
target = copy.deepcopy(self.model[targetId])
#xxx todo, we might take the wrong backrefs with us, also these target nodes might not have their parent here
nodes[targetId]=target
else:
nodes[id]=self.model[id]
for child in self.model[id]["children"]:
nodes.update(self.__get_node_with_children(child,nodes,includeForwardRefs))
return nodes
def get_branch(self,desc,includeRoot=True,includeForwardRefs=True):
"""
get a branch of the model starting from desc including all children excluding:
columns
files
for referencers, we do not follow deep search for leaves, we just include the first level referenced nodes
referencers poiting to nodes that are not part of the branch will also be included
Returns:
a list of nodedicts that can be used as a full valid model again
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
nodes = {}
nodes.update(self.__get_node_with_children(id,nodes,includeForwardRefs))
#now we also need all nodes to the desc
if includeRoot:
while self.model[id]["parent"]!="0":
#the parent is not invalid so take the parent, we don't make further check for files and otheres
parentId = self.model[id]["parent"]
parentNode = copy.deepcopy(self.model[parentId])
parentNode["children"]=[id] # the other side-children are not of interest
nodes.update({parentId:parentNode})
id = self.model[id]["parent"]
return copy.deepcopy(nodes)
def __get_node_with_children_pretty(self,id,depth = None,ignore = []):
"""
recursive helper for get_branch_pretty
args:
nodes: the nodes so far
"""
#t=utils.Profiling(f"id {self.get_browse_path(id)}, ignore = {ignore}")
result = {}
node = self.model[id]
#create my properties
props = {k: copy.deepcopy(v) for k, v in node.items() if k not in ["value", "backRefs", "children"]}
if node["type"] not in ["file", "column","timeseries"]:
# we also take the value then
props["value"] = copy.deepcopy(node["value"])
if node["type"] == "referencer" and (depth is None or depth>0):
#tt = utils.Profiling("get leaves")
leaves = self.get_leaves_ids(id)
#print(tt)
#tt.start("get leaves data")
forwards = [self.get_browse_path(leaf) for leaf in leaves]
props["leaves"]=forwards
#tt.lap("1")
props["targets"] = [self.get_browse_path(id) for id in self.model[id]["forwardRefs"]]
props["leavesIds"]=leaves
props["leavesValues"] = [self.get_value(id) if self.model[id]["type"] not in ["file","column","timeseries"] else None for id in leaves]
#tt.lap("2")
validation = []
props["leavesProperties"]={}
for id in leaves:
prop = self.get_node_info(id,includeLongValues=False)
if "validation" in prop:
validation.append(prop["validation"])
else:
validation.append(None)
props["leavesProperties"][id]=prop
props["leavesProperties"][id]["browsePath"]=self.get_browse_path(id)
#tt.lap("3")
props["leavesValidation"] = validation
#print(tt)
#make sure we have the browsepath on board
if "browsePath" not in props:
props["browsePath"]=self.get_browse_path(id)
result[".properties"]=props
if depth is None or depth>0:
#now the children
nextDepth = None
if depth is not None:
nextDepth = depth -1
for childId in node["children"]:
childPath = self.get_browse_path(childId)
if any([ignoreName in childPath for ignoreName in ignore]):
#self.logger.debug(f"ignore {childPath}")
pass
else:
result[self.model[childId]["name"]]=self.__get_node_with_children_pretty(childId,nextDepth,ignore)
#print(t)
return result
def get_branch_pretty(self,desc,depth=None,ignore = []):
"""
get a branch in the form
"child1":{"child3":... ".type":, ".value"
"child2":{
the properties occurr in ".property" style, the children are direct entries
we only use names
for the referencers, the ".forwardRefs" are the leaves with full path: ["root.folder1.tzarget2","root.varibale.bare"..]
Args:
desc [string] the root node to start from
depth [int] the depth to look into
"""
with self.lock:
#p=utils.Profiling("get_branch_pretty")
id = self.__get_id(desc)
if not id: return None
res = self.__get_node_with_children_pretty(id,depth,ignore)
#self.logger.debug(p)
return res
def get_node_with_children(self,desc):
""" retrieve node information including children of the first level
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node including the browsepath
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
response = copy.deepcopy(self.model[id])
response["browsePath"]=self.get_browse_path(id)
if response["children"]!=[]:
children =[]
for childId in response["children"]:
childInfo = copy.deepcopy(self.model[childId])
childInfo["browsePath"]=self.get_browse_path(childId)
children.append(childInfo)
response["children"]=children
return response
def get_models(self):
"""
get the available model files from the disk under /models
: Returns: a list of strings
"""
try:
mydir = myGlobalDir
os.chdir(mydir) # to enable import easily
files = os.listdir(mydir + '/models')
# take only the ones with '.json, but cut the '.model.json' extension
models = [f.split('.model')[0] for f in files if f.endswith(".json")]
return models
except Exception as ex:
self.logger.error("Model.get_models() failed "+str(ex))
return []
def get_info(self):
"""
get some information about the model
Returns: (dict) key value pairs on information of the model,
"""
return {"name":self.currentModelName}
def import_plugins_from_directory(self, plugin_directory: str, check_file_marker = True):
""" find all plugins from plugin_directory.
take from there the templates from the files and the functions
Args:
check_file_marker: if set to True, we expect a "#21datalabplugin" string in the first line
"""
if plugin_directory not in sys.path:
sys.path.append(plugin_directory) # for the importlib to find the stuff
plugin_filenames = glob.glob(os.path.join(plugin_directory, '**/*.py'), recursive=True)
for fileName in plugin_filenames:
if fileName.startswith('__'):
continue # avoid __pycache__ things
#we need to check if extra plugins have the "#21datalabplugin
if check_file_marker:
absolutePath = os.path.join(myGlobalDir,fileName)
f = open(absolutePath,"r")
firstLine = f.readline()
f.close()
if firstLine != "#21datalabplugin\n":
continue
filename_relative = os.path.relpath(fileName, plugin_directory)
moduleName = os.path.splitext(filename_relative)[0].replace(os.path.sep, '.')
self.logger.info(f"import plugin lib {moduleName}")
module = importlib.import_module(moduleName)
module = importlib.reload(module) # if we change an already imported, python uses the cache, so to make sure we always get the latest, reimport here
#now analyze all objects in the module
for objName in dir(module):
if objName.startswith('__'):
continue # these are python generated info objects, we don't want them
element = getattr(module,objName)
if type(element) is dict:
#this is a template information
self.templates[moduleName+"."+objName]=copy.deepcopy(element)
elif (inspect.isclass(element)):
newClass = {"module":module,"class":element}
self.objectClasses[moduleName + "." + objName] = newClass
elif callable(element):
#this is a function, get more info
newFunction = {"module":module, "function":element}
self.functions[moduleName+"."+objName]=newFunction
def import_default_plugins(self):
""" find all plugins (= all .py files in the ./plugin folder
take from there the templates from the files and the functions
don't check them for #21datalabplugin marker
this function is execution on startup of the model
"""
self.import_plugins_from_directory(os.path.join(myGlobalDir, 'plugins'),check_file_marker=False)
def get_id(self,ids):
""" convert a descriptor or a list into only ids (which can be used as entry to the model dictionary
Args:
ids (string, list(string)): a single or list of strings containing either and id ("101") or browsepath ("root.myfolder.myvar")
Returns:
a list(id) or id as string
"""
with self.lock:
if type(ids) == type(list()):
newList = []
for id in ids:
newList.append(self.__get_id(id))
return newList
elif type(ids) == type(dict()):
newDict = {}
for oldId in ids:
id = self.__get_id(oldId)
newDict[id]=ids[oldId] #also copy the value
return newDict
else:
#assume its scalar
return self.__get_id(ids)
def get_browse_path(self,desc):
"""
Args:
desc(string): a node id or browsepatch
Returns:
(string) a browsepath
"""
with self.lock:
id = self.get_id(desc)
if not id in self.model:
return None
path = self.model[id]["name"]
while 1:
id = self.model[id]["parent"]
if id =="0":
break
else:
path = self.model[id]["name"]+"."+path
return path
def push_nodes(self,nodeDicts):
"""
push a ready nodedict into the mode
this is a dangerous function as it does not adjust references, parent/child relations whatsoever
you must take care of that yourself
"""
for nodeDict in nodeDicts:
self.logger.warning(f"pushing node {nodeDict["id"], nodeDict["name"]}")
self.model[nodeDict["id"]]=copy.deepcopy(nodeDict)
self.__notify_observers([],None) # just trigger the treeupdate for now
#xxx todo notify!
def create_node(self,parent="root",type="folder",value=None,name="newNode",properties={}):
"""
create a node inside the model by giving several infos
Args:
parent: a descriptor (browsepath or id) of the parent
type: the type of the node
value: (optional) give a value for the node
name(string): a name of the node, must be unique under the parent
properties (dict): a dictionary containing further key-values to be placed into the node as properties
Returns:
(string) nodeid,
None for problem durinf creation
"""
#check if parent exists
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return None
#check if same name existst already
newpath = self.get_browse_path(parent)+"."+name
if self.get_id(newpath):
#we found it, it exists alreay, so we can't create it
return None
# we can create this node
if self.idCreationHash == True:
newId = str((random.randrange(2**64))) # a 64 bit random value
else:
self.globalIdCounter += 1
newId = str(self.globalIdCounter)
newNode = copy.deepcopy(self.nodeTemplate)
newNode.update({"id":newId,"name":name,"type":type,"parent":parentId})
if properties !={}:
newNode.update(properties)
if value != None:
newNode["value"]=value
self.model[parentId]["children"].append(newId)
self.model[newId] = newNode
if newNode["type"] == "timeseries":
self.time_series_create(newId)
if newNode["type"] == "eventseries":
self.event_series_create(newId)
if newNode["type"] == "object":
if "class" not in newNode:
newNode["class"]=None
if "autoReload" not in newNode:
newNode["autoReload"] = False # set this to true means: on a "instantiate object, we reload the module
self.__notify_observers(parentId,"children")
return newNode["id"]
def create_node_from_path(self,path,properties={"type":"variable"}):
"""
create a node from a path given, all intermediate nodes of th path given that do not yet exist are also created as folder type
Args:
path(string): the path to the node to be creates
properties(dict): the properties of the node
example:
create_node_from_path("root.myfolder.something.thisvar")
this will create myfolder as folder, something as folder, thisvar as variable and will also
set all hierarchies correctly
Returns:
(string) the nodeid created or
None if problem during creation
"""
currentNode = "root" #root
with self.lock:
for node in path.split('.')[1:-1]:
if not self.__get_id(currentNode+'.'+node):
#this one does not exist, so make it
self.create_node(currentNode,name=node)
currentNode += '.'+node
return self.create_node(parent=currentNode,name=path.split('.')[-1],properties=properties)
def create_nodes_from_template(self,parent="root",template=[]):
"""
deprecated!! this is the old style of templates as lists, now it's a dict
Create a node from a template; a template is a list of node-dicts,
Args:
parent(string): descriptor of the parent node under which the nodes of the template should be created
template: a list of node dicts of the nodes to be creates, children are allowed as dict
Returns:
(boolenan) True for created, False for error
Example:
create_nodes_from_template(parent="root.myfolder",[{"name":"myvariable1","type":"variable"},
{"name":"myfolder","type":"folder","children":[
{"name":"mysubvar","type":"variable"}]])
"""
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return False
newNodeIds = [] #these must be corrected later
for node in template:
#we take all info from the nodes and insert it into the tree
nodeName = node["name"]
newNodeId = self.create_node(parentId,name=nodeName,properties=node)
newNodeIds.append(newNodeId)
#do we have "children per template syntax"?, then remove that property from the nodes and make more nodes
if "children" in self.model[newNodeId]:
savedChildren = copy.deepcopy(self.model[newNodeId]["children"])
self.model[newNodeId]["children"]=[] # empty out
for child in savedChildren:
newChildId = self.create_node(newNodeId,name=child["name"],properties=child)
newNodeIds.append(newChildId)
#now correct missing stuff
for nodeId in newNodeIds:
if self.model[nodeId]["type"]== "referencer":
# convert the path of references into an id: get the parent path, add the tail, convert to id
forwardReferences =self.model[nodeId]["forwardRefs"] #make a copy, we'll delete this
self.model[nodeId]["forwardRefs"]=[]
parentPath = self.get_browse_path(self.model[nodeId]["parent"])
for forwardRef in forwardReferences:
forwardPath = parentPath+forwardRef
self.add_forward_refs(nodeId,[forwardPath])
return True
def __create_nodes_from_path_with_children(self,parentPath,nodes):
"""
recursive helper function for create_template_from_path
e build all nodes under the parentPath on this level and then the children
we return a list of all created node ids
"""
createdNodes = []
for node in nodes:
newModelNode = {}
for k, v in node.items():
if k not in ["children", "parent", "id", "browsePath"]: # avoid stupid things
newModelNode[k] = v
newId = self.create_node_from_path(parentPath+'.'+newModelNode["name"],newModelNode)
if newId:
createdNodes.append(newId)
if "children" in node:
createdNodes.extend(self.__create_nodes_from_path_with_children(parentPath+'.'+newModelNode["name"],node["children"]))
return createdNodes
def create_template_from_path(self,path,template):
"""
Create a template from a path given, the template contains one or more nodes
the path must not yet exist!
Args:
path(string): the path under which the template will be placed. the template always contains
a root node, this will be renamed according to the path
Returns:
(boolenan) True for created, False for error
"""
with self.lock:
#first create the template root node
#we rename the template according to the path requested
template["name"]=path.split('.')[-1]
parentPath = '.'.join(path.split('.')[:-1])
newNodeIds = self.__create_nodes_from_path_with_children(parentPath,[template])
self.logger.debug(f"create_template_from_path, new nodeids: {newNodeIds}")
#now adjust the references of new nodes and of the ones that were there
for newNodeId in newNodeIds:
if "references" in self.model[newNodeId]:
#we must create forward references
for ref in self.model[newNodeId]["references"]:
# now there are two options:
# the given path is of the form templatename.levelone.leveltwo inside the template
# we replace the "templatename" with the path name the template was given
# or the path is absolute id or browsepath, then we don't modify
splitted = ref.split('.')
if len(splitted) == 1 or splitted[0]=="root":
targetPath = ref
else:
targetPath = parentPath+'.'+template['name']+'.'+'.'.join(ref.split('.')[1:])
self.add_forward_refs(newNodeId,[targetPath])
del self.model[newNodeId]["references"] # we remove the reference information from the template
def get_templates(self):
"""
give all templates loaded
Returns: a dict with entries containing the full templates
"""
with self.lock:
return copy.deepcopy(self.templates)
def add_forward_refs(self,referencerDesc,targets,allowDuplicates = True):
"""
adding forward references from a referencer to other nodes, the forward references are appended at the list
of forward references of the referencer node
references to oneself are not allowed
Args:
referenderDesc (string): descriptor of the referencer node from which we want to add forward references
targets (list(descriptors)): listof node descriptors to which we want to add forward refs
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(referencerDesc)
if not fromId:
self.logger.error("can't set forward ref on "+str(referencerDesc))
return False
if type(targets) is not list:
targets = [targets]
if targets==[]:
return True
if not self.model[fromId]["type"]=="referencer":
self.logger.error("can't set forward ref on "+str(referencerDesc)+ "is not type referencer, is type"+self.model[fromId]["type"])
return False
for target in targets:
toId = self.get_id(target)
if not toId:
continue
if toId == fromId:
continue
if not allowDuplicates:
if toId in self.model[fromId]["forwardRefs"]:
continue # ignore this forwards ref, we have it already
self.model[toId]["backRefs"].append(fromId)
self.model[fromId]["forwardRefs"].append(toId)
self.__notify_observers(fromId,"forwardRefs")
return True
def lock_model(self):
self.lock.acquire()
def release_model(self):
self.lock.release()
def get_model(self):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
"""
with self.lock:
#also add the browsepath to all nodes
for nodeid in self.model:
self.model[nodeid]["browsePath"]=self.get_browse_path(nodeid)
return copy.deepcopy(self.model)
def get_model_for_web(self,getHash=False):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
but leaving out the column values (this can be a lot of data)
and the file values (files are binary or strings with big size, typically serialized ML-models)
for files and columns, we either return a string "len 12344" or a sha1 hash value 133344
"""
model = {}
p=utils.Profiling("get_model_for_web")
with self.lock:
for nodeId, nodeDict in self.model.items():
if nodeDict["type"] in ["column","file","timeseries","eventseries"]:
# with columns we filter out the values
node = {}
for nk, nv in nodeDict.items():
if nk == "value":
try:
if not getHash:
node[nk] = "len " + str(len(nv))
else:
start = datetime.datetime.now()
hash = hashlib.sha1(nv.tobytes())
node[nk] = hash.hexdigest()
self.logger.debug(f"hashed {nodeDict["name"]} in {(datetime.datetime.now()-start).total_seconds()} hash:{node[nk]}")
except:
node[nk] = "None"
else:
node[nk] = copy.deepcopy(nv) # values can be list, dict and deeper objects
model[nodeId] = node
elif nodeDict["type"]=="object":
node={k:v for k,v in nodeDict.items() if k!="object"}
model[nodeId]=node
else:
#this node is not a colum, can still hold huge data
model[nodeId] = copy.deepcopy(nodeDict) # values can be list, dict and deeper objects nodeDict
model[nodeId]["browsePath"] = self.get_browse_path(nodeId) #also add the browsepath
self.logger.debug(f"{p}")
return model
def remove_forward_refs(self,sourceDesc,targetDescriptors = [], deleteDuplicates=False):
"""
remove forward references from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
targets: a list of descriptors, if missing we delete all
deleteDuplicates: if set true, we delete all referenes to a target if we hae more than one reference
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
if not fromId:
return False
if not self.model[fromId]["type"] == "referencer":
return False # only for referencers
if targetDescriptors == []:
targets = self.model[fromId]["forwardRefs"].copy()
else:
targets = self.get_id(targetDescriptors)
if targets == []:
return True# nothing to do
for toId in targets:
if not toId:
continue # we skip Nones coming from the get_id
if deleteDuplicates:
# maybe multiple entries
while toId in self.model[fromId]["forwardRefs"]: # maybe multiple entries
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
else:
# we delete only one entry
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId,"forwardRefs")
return True
def remove_forward_ref(self,sourceDesc,targetDesc):
"""
remove a forward reference from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
toId = self.get_id(targetDesc)
if not fromId or not toId:
return False
if not self.model[fromId]["type"]=="referencer":
return False # only for referencers
try:
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId, "forwardRefs")
return True
except:
return False
def remove_back_ref(self,sourceDesc,targetDesc):
"""
remove a backwards reference from any node to a referencer, this also removes the forwardreferece from the target
actually, this function is just a helper. Normally, we only talk about "forward references";
each forward reference also creates a backwards reference in the model, but this is just for internal look up speed
the reference here is targetDesc -> (forwardRef) -> sourceDesc
Args:
sourceDesc: the descriptor of the node that holds a backwards reference
targetDesc: the descriptor of the node that holds the forward reference
Returns:
True/False for success
"""
with self.lock:
return self.remove_forward_ref(targetDesc,sourceDesc)
def add_property(self,nodeDesc,property,value):
"""
add a random property entry for a node, a node is a key-value store, a property is a key with a value
Args:
nodeDesc: the descriptor of the node
property: the key to be created on the node
value: the value to be stored for this property
Returns:
True for create
False for node not found or if the property already exists
"""
with self.lock:
id = self.get_id(nodeDesc)
if not id:
return False
if property in self.model[id]:
return False # have this property already
self.model[id][property]=value
self.__notify_observers(id, property)
return True
def set_properties(self,properties={},nodeDesc=None):
"""
changes a random set of properties given by the dict or adds them if not existant, some properties are not allowed here:
children, parent, forward and back ward refs, allowed are all others including type, name, value
Args:
nodeDesc: the descriptor of the node, is optional, can also be given as browsePath or id in he properties dict
properties: the new properties or changed
Returns:
True for done
False for node not found or if the property already exists
"""
with self.lock:
if nodeDesc:
id = self.get_id(nodeDesc)
elif "id" in properties:
id = properties["id"]
elif "browsePath" in properties:
id = self.get_id(properties["browsePath"])
else:
self.logger.error("set properties is missing id ")
return False
if not id:
return False
notificationProperties = []
for k,v in properties.items():
if k in ["id","browsePath","children","parent","forwardRefs","backRefs"]:
continue # we ignore these entries
self.model[id][k]=v # overwrite or set new
notificationProperties.append(k)
self.__notify_observers(id,notificationProperties)
return True
def find_all_children_recursive(self,nodeIds):
""" find all children recursively, give a list of """
with self.lock:
children = []
for id in nodeIds:
if self.model[id]["children"]:
children.extend(self.find_all_children_recursive(self.model[id]["children"]))
children.append(id)
return children
#delete node and all subnodes
def delete_node(self,desc):
"""
delete a node and all its recursive children;
flow:
1) make a list of all nodes to be deleted
2) rip off all references to /from delete nodes
3) delete all nodes
4) notify observers about children change on the delete nodes
desc(string): the descriptor of the node
Returns:
True for success
False for node not found
"""
with self.lock:
id = self.get_id(desc)
if not id:
return False
nodesToDelete = self.find_all_children_recursive([id])
self.logger.debug(f"delete nodes {nodesToDelete}")
childNotify = []
#first rip off all references
for id in nodesToDelete:
forwards = self.model[id]["forwardRefs"].copy()
backwards = self.model[id]["backRefs"].copy()
for forward in forwards:
self.remove_forward_ref(id,forward) # this will also trigger observers
for backward in backwards:
self.remove_back_ref(id,backward) # this will also trigger observers
#now delete the acutal nodes
for id in nodesToDelete:
parentId = self.model[id]["parent"]
if parentId in self.model:
self.model[parentId]["children"].remove(id)
childNotify.append(parentId)
if self.model[id]["type"]=="timeseries":
self.time_series_delete(id)
del self.model[id]
#now notify only those who still exist
goodNotify=[]
for id in childNotify:
if id in self.model:
goodNotify.append(id)
if goodNotify:
self.__notify_observers(goodNotify, "children") # make ONE call for the observers
return True
# if desc.type is a var, function then we just set the value
# if it's a timeseries" then we set a column in a table, padded if needed
def set_value(self,desc,value):
"""
set the value property of a node, if the node does not have a value property yet, it is created here
Args:
desc(string): node descriptor
value (any): any value to be stored
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
#convert if table:
if self.model[id]["type"] == "column":
value = numpy.asarray(value,dtype=numpy.float64)
self.model[id]["value"] = value
self.__notify_observers(id,"value")
return True
def get_value(self,desc):
"""
read out the "value" property of a node
Args:
desc(string): the node that holds the value
Returns:
the value
None if the node has no "value" property
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
if self.model[id]["type"] == "timeseries":
values = self.time_series_get_table(id)
if values:
return self.time_series_get_table(id)[id]["values"]
else:
return None
if "value" in self.model[id]:
return copy.deepcopy(self.model[id]["value"])
else:
return None
def __copy_node(self,id,resolveChildren=False):
"""
get a copy of a node, we don't create a node in the model here!
copy node with all properties, if the node is a "column", we don't copy the value
if the resolveChildren is set to true, we also copy the direct children
the copied node can't be used to create a node, as it is the copy of an existing node!
Args:
id (string): the node id to be copied
resolveChildren (bool): False to not copy the children (the new node has no children)
True to copy-create also the children
Return:
(dict) the node
"""
newNode = {}
for key in self.model[id]:
if key == "value" and self.model[id]["type"]in ["column","file","timeseries"]:
newNode["value"]=None
elif key == "children" and resolveChildren:
#we also copy the children
newNode["children"]=[]
for childId in self.model[id]["children"]:
childNode = self.__copy_node(childId)
newNode["children"].append(childNode)
else:
newNode[key]=copy.deepcopy(self.model[id][key])
return newNode
def __get_targets(self,id):
"""
#this is a recusive helper function for the get_leaves function
"""
targets=[]
if self.model[id]["type"] == "referencer":
for targetId in self.model[id]["forwardRefs"]:
targets.extend(self.__get_targets(targetId))
elif self.model[id]["type"] == "folder":
for targetId in self.model[id]["children"]:
targets.extend(self.__get_targets(targetId))
else:
addNode = self.__copy_node(id,resolveChildren=True)
addNode["browsePath"]=self.get_browse_path(id)
targets = [addNode]
return targets
def get_leaves_ids(self,desc):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
leaves = self.get_leaves(desc) # a list of node dicts
leaveIds = []
for leave in leaves:
leaveIds.append(leave["id"])
return leaveIds
def get_leaves(self,desc,allowDuplicates=False):
"""
this function returns a list of dicts containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all node dicts which are considered leaves as a list of node dicts
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
targets=self.__get_targets(id)
if targets and targets[0]["id"] == id:
#this can happen if the node is not a folder, ref and had no children
targets.pop(0)
#before we return, we remove duplicates if wanted
if targets and allowDuplicates == False:
reducedTargets = []
ids = []
for t in targets:
if t["id"] in ids:
continue
reducedTargets.append(t)
ids.append(t["id"])
return reducedTargets
else:
return targets
def __get_referencer_parents(self,ids):
backRefs = []
#we look back from this node
for id in ids:
if self.model[id]["type"] == "referencer":
#we take this one in
backRefs.append(id)
#plus we look further up
thisBackRefs = self.model[id]["backRefs"]
if thisBackRefs:
backRefs.extend(self.__get_referencer_parents(thisBackRefs))
return backRefs
def get_referencers_old(self,desc):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deep: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
ids = [self.model[id]["parent"],id]
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def get_referencers(self,desc,deepLevel = 1):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deepLevel: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
we give the number of parent levels to include in the search at the leaves
default is 1, so the node itself and its parent
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
if not deepLevel:
ids = [self.model[id]["parent"],id]
else:
ids = self._get_parents(id,deepLevel)
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def _get_parents(self,id,deepLevel = -1):
ids = []
while id != "1" and deepLevel >= 0:
ids.append(id)
deepLevel -=1
id = self.model[id]["parent"]
return ids
#get a table with values like in the table stored, start and end times are optional
# if start, end not given, then we get the full table with no postprocessing at all
def get_timeseries_table_old(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,includeBackGround=None):
with self.lock:
variables = self.get_id(variables)
return self.timeSeriesTables.get_value_table(variables, startTime=startTime, endTime=endTime, noBins=noBins,
agg=agg,
includeTimeStamps=includeTimeStamps) # ,startTime,endTime)
'''
if startTime == None and endTime ==None:
#request the full table
variables = self.get_id(variables) # convert all to ids
return self.timeSeriesTables.get_value_table(variables,startTime=startTime,endTime=endTime,noBins=noBins,agg=agg,includeTimeStamps=includeTimeStamps)#,startTime,endTime)
else:
# this is a more details request, we will try to deliver the data in bins and with
# aggretation postprocessing
variables = self.get_id(variables) # convert all to ids, not browsepath
return self.timeSeriesTables.get_value_table(variables,startTime,endTime,noBins,agg,includeTimeStamps=includeTimeStamps)
'''
#used in the Node class, give a column variable or the table itself, return the nodeid of the time variable of that table
def find_table_time_node(self,desc):
with self.lock:
table = self.__find_table(self.get_id(desc))
if not table:
return None
pathToTimeIndex = self.get_browse_path(table)+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id'] # this referencer must point to only one node
return timeColumnId
def find_table_node(self,desc):
"""
get the node id of a table giving a column node of the table as input
Args
desc[string]: a node descriptor of a column node belonging to the table
Returns:
the node id of the table node
"""
with self.lock:
return self.__find_table(desc)
def get_child(self,desc,childName):
"""
get a child based on the name given
Args:
desc: node descriptor of the node under which we look for children
name: the child name to look for
Returns:
a nodeid if we find the child with "name" under the desc or none if not found
:return:
"""
with self.lock:
nodeInfo = self.get_node_info(desc)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.get_node_info(childId)
if childInfo["name"] == childName:
return childId
return None
def get_children_dict(self,desc):
"""
create a dictionary with key= childName and value = nodedict
Args:
desc: the nodedescriptor
Returns:
a dict
"""
with self.lock:
childrenDic={}
id = self.get_id(desc)
if not id:
return None
for childId in self.model[id]["children"]:
child = self.get_node_info(childId)
childrenDic[child["name"]]=child
return childrenDic
def get_table_len(self,desc):
"""
get the current length of a table
Args:
desc: the node descriptor of type table
Returns:
the current length of the columns of the table, none if error
"""
with self.lock:
tableId = self.get_id(desc)
if not tableId: return None
if not self.model[tableId]["type"]=="table": return None
try:
columnid = self.get_child(tableId,"columns")
if not columnid: return None
columnIds = self.get_leaves_ids(columnid)
if columnIds:
return len(self.model[columnIds[0]]["value"])
except:
return None
def get_timeseries_table(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,format="array",includeBackGround=None):
"""
get a time series table from variables. The table is returned as a list[list] object
all variables requested must be of type "column" and must belong to the same table:
all columns requested here must have a direct backreference to the same node of type "columns"
todo: also allow "columns" to point to folders or multiple hierarchies of referencing/folders
Args:
variables (list(nodedescriptors)): nodes to be part the data table requested (ordered!)
startime, endTime: the start and endtime of the table given as seconds since epoch
#we also allow the special case of endTime = 0 and startTime = -interval
# we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
agg(string): the aggregation function to be used when we downsample the data,
"sample": this means, we just pick out values (we sample) the data set, this is actually not an aggregation
includeTimesStampe (bool): currently ignored
includeBackGround (bool): currently ignored
Returns(dict)
key : value
"__time" : list of timestamps for the returned table in epoch seconds
"variable1": the list of float values of one of the requested variables
"""
with self.lock:
#first check if all requested timeseries are columns from the same table
vars = self.get_id(variables)
table = []
for var in vars:
if self.model[var]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(var))
if len(set(table)) != 1 or set(table)== {None}:
self.logger.warning("not the same table")
return False
#get the time field, and make fancy indexing via numpy arrays
pathToTimeIndex = self.get_browse_path(table[0])+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id']
if startTime and endTime:
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where((times>=startTime) & (times<=endTime))[0]
#xxx todo find the right index
elif startTime and not endTime:
#special cases for [-startTime:] and [startTime:] requests
if startTime < 0:
#this is the special case that we take an interval from the end
endTime = self.model[timeColumnId]["value"][-1]# the last
startTime = endTime +startTime # as startTime is negative this is actually substraction
else:
#starttime is positive
pass
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where(times >= startTime)[0]
else:
indices = numpy.arange(0,len(self.model[timeColumnId]["value"])) ## all indices
#now resample the indices to have the right bins number
if noBins:
varIndices = np.linspace(indices[0], indices[-1], noBins, endpoint=False, dtype=int)
else:
varIndices = indices
if format=="array":
result = []
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data=original.tolist() # apply the selection with the indices list
result.append(data)
else:
result = {}
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data = original.tolist() # apply the selection with the indices list
result[var]=data
result["__time"]=np.asarray(self.model[timeColumnId]["value"])[varIndices].tolist()
return result
def add_timeseries(self,blob,fast=False):
"""
add a dictionary of variables to a table, we check if the variables belong to the same table
also, times that come in as datetime object are converted to epoch seconds
Args:
blob (dict): a dictionary containing keys (node descriptors) and values (scalars)
Returns:
True/False for success
"""
with self.lock:
table = []
for key in blob:
id = self.get_id(key)
if not id:
self.logger.warn("add_timeseries count not find the variable:" + str(key))
return False
if self.model[id]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(id))
if len(set(table)) != 1 or set(table) == {None}:
self.logger.warn("not the same table")
return False
#here, the request is parsed as ok, let's put the values
for key in blob:
id = self.get_id(key)
value = blob[key]
if type(self.model[id]["value"]) is not list:
self.model[id]["value"]=[]
#we auto-convert time stamps
if type(value) is datetime.datetime:
value = date2secs(value)
self.model[id]["value"].append(value)#finally put the value
#return the id of the table, give a column variable
def __find_table(self,desc):
"""
return the node id of the table, give a column variable
!! this has no lock, must be called under lock
Args:
desc(string): node descriptor of type column or the table itself
Returns:
the node id of the table to which the desc node belongs
"""
id = self.get_id(desc)
if not id: return False
if self.model[id]["type"] == "table":
return id
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
return self.model[ref]["parent"]
return None
def ts_table_add_blob(self,dataBlob):
"""
this function add a data blob to an existing table, it accepts multiple values at once to speed up internals
Args:
dataBlob (dict or list(dict)): containing key:value pair with key=a descriptor of a column of one table
value: a scalar or list or numpy array of values
"""
if type(dataBlob) is list:
self.logger.error("currently no support for list blobs")
return None
with self.lock:
#first find the table and decide for the type conversion
for key in dataBlob:
if key != '__time':
tableId = self.__find_table(key)
break
if not tableId:
self.logger.error("can't find the table of "+str(dataBlob[list(dataBlob.keys())[0]]))
tableNode =self.get_node(tableId)
columnsType = numpy.float64 # this is the default
# make sure the time is there and convert it: we accept datetime objects, iso strings or floats seconds
# plus, the key will be the time node id afterwards
timeNode = tableNode.get_child("timeField").get_leaves()[0]
#try to find the time entry in the dataBlob, rename it to the timenode id
timeKeyOptions = ['__time',timeNode.get_browse_path(),timeNode.get_id()]
for timeKeyOption in timeKeyOptions:
if timeKeyOption in dataBlob:
dataBlob[timeNode.get_id()] = dataBlob.pop(timeKeyOption) # from now on the time field is names as its browsepath
break
if timeNode.get_id() not in dataBlob:
self.logger.error("time field entry missing")
return False
#now check if all are on the same table and convert the keys to node ids
variables = list(dataBlob.keys())
for var in variables:
if self.__find_table(var) != tableId:
self.logger.error("variables are not on the same table")
return False
id = self.get_id(var)
if id != var:
dataBlob[self.get_id(var)]=dataBlob.pop(var) # make new entry as nodeid
#now check the sizes of the incoming data and convert them to the requested type
inputSizes = set()
for key,value in dataBlob.items():
if key == timeNode.get_id():
#if we handle the time node, we might have to convert
if type(value) is list or type(value) is numpy.ndarray:
newValues = []
#newValues = numpy.asarray([],dtype=numpy.float64)
for val in value:
newValues.append(date2secs(val))
dataBlob[key] = numpy.asarray(newValues,dtype=numpy.float64) # write it back to the data
else:
#it is a scalar
dataBlob[key] = numpy.asarray([date2secs(value)],dtype=numpy.float64)
else:
if numpy.isscalar(dataBlob[key]):
dataBlob[key]=numpy.asarray([dataBlob[key]],dtype=columnsType) # make a list if it is scalar
else:
dataBlob[key]=numpy.asarray(dataBlob[key],dtype=columnsType) # if it is a numpy array already, numpy makes no copy
inputSizes.add(dataBlob[key].shape[0])
if len(inputSizes)!=1:
self.logger.error("incoming data has different len, can't hande as padding is unclear")
# when we are here, we have converted all incoming data ot numpy arrays, all belong to the same table
# and all have the same length, we are ready to put them inside
#print("through")
#now append them
return self.__ts_table_add_row(dataBlob,tableNodeId=tableId)
def __ts_table_add_row(self,dataBlob,tableNodeId=None,autoPad=True,pad=numpy.NaN):
"""
must be called under lock !!
this function accepts a dataBlob which is ready to be inserted, we don't make any more checks here
it must use variables from one table, it must contain data as numpyarrays
variables of the tables which are missing will be filled with pad if autoPad is true
"""
if not tableNodeId:
tableNode = self.get_node(self.__get_table(list(dataBlob.keys())[0]))
else:
tableNode = self.get_node(tableNodeId)
dataLen = dataBlob[list(dataBlob)[0]].shape[0]
columnNodes = tableNode.get_child("columns").get_leaves()
for columnNode in columnNodes:
id = columnNode.get_id()
if id in dataBlob:
#we add that one to the table
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = dataBlob[id]
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],dataBlob[id])
else:
#we must pad
self.loger.debug("we are padding "+id+" with % ",dataLen)
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]=numpy.full(dataLen,numpy.nan)
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],numpy.full(dataLen,numpy.nan))
return True
def append_table(self,blob,autocreate=True,autopad=True, timeSorted = False):
"""
this function accepts a dictionary containing paths and values and adds them as a row to a table
if autoPad is True: it is allowed to leave out columns, those will be padded with numpy.inf,
if autocreate is True: it is allowed to add unknown colums, those will be added automatically under the given name
Args:
blob(dict):
keys: node descriptors,
values: value to be appended to the table (scalar or list per variable is allowed
the times should be given in a variable ending with ".time"
if the table exists already and has another node for the time-values, then we take the .time values and put them on the timenode
autocreate(bool): if set to true and the nodes or table in the dict do not exist yet, we autocreate a table
autopad(bool) if set to true, we automatically pad values in an existing table if variables of the table are not part of the blob
doing so, we keep consistent lenght for all columns of a table
"""
#first check if we need to autocreate something, also check if we have multiple tables in play
with self.lock:
autocreates = []
tableId = None
columnsId = None
numberOfRows = None
for key in blob:
id = self.__get_id(key)
if not id:
if not autocreate:
self.logger.warn("appending table with unknown variables")
return None
else:
#we create this thing later
autocreates.append(key)
else:
#the id was found, let's find the right table
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
#this is our table
if not tableId:
tableId = self.model[ref]["parent"]
columnsId = ref
numberOfRows = len(self.model[id]["value"])
else:
if tableId != self.model[ref]["parent"]:
self.logger.warn("mixed tables request")
return None
self.logger.debug("append table "+str(self.get_browse_path(tableId)))
if autocreates and autocreate:
#do we even have to create our table?
if not tableId:
#make a table structure based on the names given
tableName = autocreates[1].split('.')[1]+"_autotable"
tableId = self.create_node(parent="root",name=tableName,properties={"type":"table"})
columnsId = self.create_node(parent=tableId,name="columns",properties={"type":"referencer"})
timeId = self.create_node(parent=tableId, name="timeField", properties={"type": "referencer"})
numberOfRows=0
else:
#if we don't create the table, here is our timeId
timeReferencer = self.get_child(tableId, "timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
#we also then don't create any new time-field
autocreates = [path for path in autocreates if path[-5:]!=".time"]
self.logger.debug(f"table var autocreates: {autocreates}")
for path in autocreates:
id = self.create_node_from_path(path,properties={"type":"column"})
self.model[id]["value"]=numpy.full(numberOfRows,numpy.inf)
self.add_forward_refs(columnsId,[id])
if path.split('.')[-1]=="time":
#we just created the time field, we must also give the table struct the info
self.add_forward_refs(timeId,[id])
tableColumnIds = self.get_leaves_ids(columnsId) # a list of the ids of the columns
timeReferencer = self.get_child(tableId,"timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
timePath = None
for path in blob:
if path[-5:] == ".time":
timePath = path
if not timePath:
self.logger.error("no time path given")
return False
#now make arrays of all values
for k,v in blob.items():
if type(v) is list or type(v) is numpy.ndarray:
blob[k]=numpy.asarray(v,dtype=numpy.float64)
else:
blob[k] = numpy.asarray([v], dtype=numpy.float64)
valuesLen = len( blob[list(blob.keys())[0]] )
tableLen = len ( self.get_value(timeId))
if not timeSorted:
#just append
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.append(self.model[id]["value"],blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.append(self.model[id]["value"],numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#now trigger observser
self.__notify_observers(self.get_leaves_ids(columnsId),"value")
else:
#time sorted: find a place to insert the data in the times
currentTimes = numpy.asarray(self.get_value(timeId),dtype=numpy.float64)
startTime = blob[timePath][0]
endTime = blob[timePath][-1]
firstIndexGreaterStart, = numpy.where(currentTimes>startTime) #where returns tuple
if len(firstIndexGreaterStart) == 0:
firstIndexGreaterStart = tableLen
else:
firstIndexGreaterStart=firstIndexGreaterStart[0]
firstIndexGreaterEnd, = numpy.where(currentTimes > endTime)
if len(firstIndexGreaterEnd) == 0:
firstIndexGreaterEnd = tableLen
else:
firstIndexGreaterEnd=firstIndexGreaterEnd[0]
if firstIndexGreaterEnd != firstIndexGreaterStart:
self.logger.error("we can't insert the data in a row-wise time manner, only as block")
return False
startIndex = firstIndexGreaterStart # the position to insert the incoming data
self.logger.debug(f"insert data @{startIndex} of {tableLen}")
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#
pass
return True
def __show_subtree(self,rootId):
currentBrowsePath = self.get_browse_path(rootId)
indentation = "| "*(len(currentBrowsePath.split('.'))-1)
print (indentation+"-",self.model[rootId]["name"],end="")
noShowProperties=["name","parent","children"]
for property in self.model[rootId]:
try:
if property=="value" and len(self.model[rootId]["value"])>10:
print(",len:"+str(len(self.model[rootId]["value"])),end="")
except:
pass
if not property in noShowProperties:
try:
#if this entry has a len and the len is larger then 20, show only a part of it
if len(self.model[rootId][property]) > 10:
print("," + property + "=" + str(self.model[rootId][property][0:10])+"...("+str(len(self.model[rootId][property]))+")", end="")
else:
print("," + property + "=" + str(self.model[rootId][property]), end="")
except:
print("," + property + "=" + str(self.model[rootId][property]), end="")
if self.model[rootId]["type"]=="timeseries":
print(","+self.time_series_get_info(rootId), end="")
print("")
for child in self.model[rootId]["children"]:
self.__show_subtree(child)
def execute_object_function(self,desc,functionName,parameter=None):
with self.lock:
id = self.get_id(desc)
object = self.get_object(id)
if not object:
return False
try:
functionPointer = getattr(object,functionName)
self.executionQueue.put({"functionPointer":functionPointer,"parameter":parameter,"id":id})
return True
except:
self.logger.error(f"function {functionName} not sttr of object {desc} {object}")
return False
def execute_function(self,desc,parameter = None):
"""
create a thread to execute a function there,
if the function has autoReload, we re-import the external
file
Args:
desc: node descriptor of the node (type "function") to be executed
Returns:
True if the execution thread was launched
"""
with self.lock:
id = self.get_id(desc)
if self.model[id]["type"]!= "function":
return False
functionName = self.model[id]["functionPointer"]
if not functionName in self.functions:
self.logger.error(f"can't find function {functionName} in global list")
return False
functionNode = self.get_node(id)
executionType = functionNode.get_child("control").get_child("executionType").get_value()
if executionType in ["async","sync"]:
self.executionQueue.put(id)
self.logger.info(f"function {desc} queued for execution")
return True
elif executionType =="threaded":
self.logger.info(f"function {desc} started in thread")
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
else:
self.logger.error(f"function {desc} cant be started, unknown execution type {executionType}")
return False
#check if function is interactive, then we reload it right now
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
#if self.functions[functionName]["isInteractive"]:
# must reload the module
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module,functionName.split('.',1).pop())
#now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#here, the lock is open again!
try:
if executionType == "async" or executionType == "threaded":
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
elif executionType == "sync":
self.__execution_thread(id) # call it sync here
return True
else:
self.logger.error("unsupported execution type"+str(executionType)+" in fuction"+str(id))
raise(Exception)
except:
return False
def start_function_execution_thread(self):
self.functionExecutionRunning = True
self.functionExecutionThread = threading.Thread(target=self._function_execution_thread)
self.functionExecutionThread.start()
def _function_execution_thread(self):
while self.functionExecutionRunning:
try:
nextId = self.executionQueue.get(timeout=1)
self.logger.info(f"now executing function {str_lim(nextId,300)}")
self.__execution_thread(nextId)
except:
pass
def delete(self):
self.functionExecutionRunning = False
def exit(self):
self.delete()
def close(self):
self.delete()
def __dispatch(self,function,timeout,param):
thread = threading.Thread(target=self.__dispatch_thread_function, args=[function,timeout,param])
thread.start()
def __dispatch_thread_function(self,function,timeout,param):
time.sleep(timeout)
function(param)
#exit thread
def reset_progress_bar(self,controlNode):
controlNode.get_child("progress").set_value(0)
def __clone_children(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
for childName,childInfo in self.get_children_dict(source).items():
childId = childInfo["id"]
if childInfo["type"] in ["timeseries","file","column"]:
self.logger.debug(f"clone skip node {childInfo["name"]}")
continue
newProps = {k:v for k,v in childInfo.items() if k not in ["parent","children","backRefs","forwardRefs","browsePath","id","name"]}
cloneId = self.create_node_from_path(destPath+"."+childInfo["name"],properties=newProps)
grandChildren = self.get_children_dict(childId)
if grandChildren != {}:
self.__clone_children(childId,cloneId)
def __clone_referencer_targets(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
childIds = self.get_node_info(sourcePath)["children"]
while childIds:
id = childIds.pop()
info = self.get_node_info(id)
if info["type"]=="referencer":
newreferencer = self.get_browse_path(id).replace(sourcePath, destPath)
#now check: if the referencers points to something inside, we do the same but in the target root, else we take it as it is
for targetId in info["forwardRefs"]:
targetPath = self.get_browse_path(targetId)
newTargetPath = targetPath.replace(sourcePath,destPath)# if not found, we get it unchanged
self.add_forward_refs(newreferencer,[newTargetPath])
childIds.extend(info["children"])
def clone(self,desc):
"""
clone a node and all its subnodes (a whole branch)
we will create all nodes which existed in the source branch, for the referencers we use this stategy:
references pointing to a node under the source branch will be translated to references in the target branch
poining to the corresponding new node in the target branch
references pointing to outside the source branch will also be created in the cloned branch pointing to
the same target
Args:
desc: the source node descriptor
"""
sourcePath = self.get_browse_path(desc)
if not sourcePath:
return False
targetPath = sourcePath+"_"+getRandomId()
sourceInfo = self.get_node_info(desc)
transferRoot = self.create_node_from_path(targetPath,properties={"type":sourceInfo["type"]})
#now iterate over the nodes and children and create the same nodes
self.__clone_children(desc,transferRoot)
self.__clone_referencer_targets(sourcePath,transferRoot)
return True
def execute_synchronous(self,id):
"""
execute a function synchronously here (this can be useful when executing a function within another
"""
return self.__execution_thread(id)
def __execution_thread(self,id):
"""
the thread function to execute functions
it currently uses the global lock so it will lock out any other work on the model during execution
all inputs and outputs are found in the model
we also set the status and result from here, not needed to do that in the function
Args:
id: the node id of the function to be executed or the dict for an object call
"""
try:
if type(id) is str:
if self.model[id]["type"] == "function":
isFunction = True
else:
isFunction = False
with self.lock:
if isFunction:
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
# must reload the module
functionName = self.model[id]["functionPointer"]
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module, functionName.split('.', 1).pop())
# now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
#check the function
functionName = self.model[id]["functionPointer"]
functionPointer = self.functions[functionName]['function']
self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
else:
functionPointer = id["functionPointer"]
functionName = functionPointer.__name__
parameter = id["parameter"]
id = id["id"] #for deeper down
#now set some controls
try:
node = self.get_node(id)
controlNode = node.get_child("control")
targetId = self.get_id("root.system.progress.targets")
if targetId:
self.disable_observers()
self.remove_forward_refs(targetId)
self.add_forward_refs(targetId,[controlNode.get_child("progress").get_id()])
self.enable_observers()
# we don't signal these things
self.disable_observers()
controlNode.get_child("status").set_value("running")
controlNode.get_child("result")#.set_value("pending")
controlNode.get_child("progress").set_value(0)
#controlNode.get_child("signal").set_value("nosignal")
startTime = datetime.datetime.now()
controlNode.get_child("lastStartTime").set_value(startTime.isoformat())
self.enable_observers()
except:
self.logger.error("error during execution preparation, this can be critical, maybe disabled observers")
self.log_error()
pass
# model lock open: we execute without model lock
if isFunction:
result = functionPointer(node) # this is the actual execution
else:
result = functionPointer(parameter)
#now we are back, set the status to finished
duration = (datetime.datetime.now()-startTime).total_seconds()
with self.lock:
# this is a bit dangerous, maybe the node is not there anymore?, so the
# inner functions calls of node.xx() will return nothing, so we try, catch
try:
self.logger.debug(f"function {functionName} execution completed in {duration} ")
self.disable_observers() # we don't signal these
controlNode.get_child("lastExecutionDuration").set_value(duration)
controlNode.get_child("status").set_value("finished")
controlExecutionCounter = controlNode.get_child("executionCounter")
controlExecutionCounter.set_value(controlExecutionCounter.get_value() + 1)
controlProgress = controlNode.get_child("progress")#.set_value(0)
controlProgress.set_value(0)
self.enable_observers()
self.notify_observers([controlExecutionCounter.get_id(),controlProgress.get_id()],"value")
if not isFunction:
result = True # for execution of member function we don't have a general return code
if result == True:
controlNode.get_child("result").set_value("ok")
self.publish_event("result of " + str(functionName) + ": " + controlNode.get_child("result").get_value())
else:
if controlNode.get_child("result").get_value() == "pending":
#if the functions hasn't set anything else
controlNode.get_child("result").set_value("error")
#also publish this result
self.publish_event("error in " + str(functionName) + ": " + controlNode.get_child("result").get_value())
# except:
# self.logger.error("problem setting results from execution of #"+str(id))
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id" +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
pass
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id " +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
controlNode.get_child("status").set_value("interrupted")
controlNode.get_child("result").set_value("error:"+errorString)
controlNode.get_child("progress").set_value(0)
self.publish_event("error in "+str(functionName)+": "+errorString)
return
def get_error(self):
s=f"{sys.exc_info()[1]}, {traceback.format_exc()}"
return s
def log_error(self):
self.logger.error(self.get_error())
def show(self):
"""
show the current model as a ascii tree on he console
"""
with self.lock:
self.__show_subtree("1")
def save_model(self):
return self.save(self.currentModelName,includeData=False)
# save model and data to files
def save(self, fileName, includeData = True):
"""
save the model to disk, save the tables separately
the model file will be saves as ./models/fileName.model.json and the tables will be saved under
./models/filename.tablePath.npy
Args:
fileName to store it under, please don't give extensions
includeData : if set to False, we DONT store the values of node types tables or files to disk
"""
self.logger.debug(f"save model as {fileName} with data {includeData}")
self.publish_event(f"saving model {fileName}...")
with self.lock:
try:
m = self.get_model_for_web() # leave out the tables
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
if includeData:
self.ts.save(os.path.join(model_directory, model_filename))
f = open(os.path.join(model_directory, model_filename)+ ".model.json", "w")
f.write(json.dumps(m, indent=4))
f.close()
self.currentModelName = fileName
self.publish_event(f"model {fileName} saved.")
return True
except Exception as e:
self.logger.error("problem sving "+str(e))
self.publish_event(f"saving model {fileName} error")
return False
def move(self, nodeList, newParent, newIndex=None):
"""
move a list of nodes under a new Parent on the child position new Index
if the newParent is a referencer, we are creating references instead and keep the nodes where they are
Args:
nodeList [string]: a list of node descriptors of the nodes to move, scalar is also allowed
NewParent [string] a node descriptor for the new parent under which the nodes should appear
new Index int : the position on the children of newParent where the new nodes should appear
Returns:
True
"""
with self.lock:
if not type(nodeList) is list:
nodeList = [nodeList]
nodeIds = self.get_id(nodeList)
parentId = self.get_id(newParent)
if not parentId: return False
#check the special case that the parent is a referencer:
if self.model[parentId]["type"] == "referencer":
self.add_forward_refs(parentId,nodeIds)
self.logger.info("moves nodes as references "+ parentId + str(nodeIds))
return True
#for all others, we start moving nodes
self.logger.debug(f"model.move():{nodeIds}=>{parentId}")
try:
for id in nodeIds:
if id == parentId or id == "1":
self.logger.error("cant move " +id + " to " + parentId)
continue
oldParent = self.model[id]["parent"]
self.model[oldParent]["children"].remove(id) # remove the child from the old parent
self.model[id]["parent"]=parentId
if newIndex:
self.model[parentId]["children"].insert(newIndex,id) # at specific index
else:
self.model[parentId]["children"].append(id) # at the end
self.__notify_observers(oldParent, "children")
self.__notify_observers(parentId, "children")
except Exception as ex:
self.logger.error(f"problem moving {nodeIds} to new parent {parentId} this is critical, the model can be messed up {ex}")
return True
def clean_ts_entries(self):
"""
remove timeseries data that has no node and remove nodes (timeseries that have no timeseries data
"""
self.logger.debug("clean_ts_entries(): check consistency of model and timeseries table..")
deleteNodes = []
for id, node in self.model.items():
if node["type"] == "timeseries":
info = self.ts.get_info(id)
if "not found" in info:
self.logger.info(f" {node["name"]}: has no time series date entry in the ts table, remove node")
deleteNodes.append(id)
for id in deleteNodes:
self.delete_node(id)
deleteTs=[]
for id in self.ts.get_items():
if id not in self.model:
self.logger.info(f" timeseries data {id} has no corresponding node in model .. delete the ts-data")
self.ts.delete(id)
def load(self,fileName,includeData = True, update = False):
"""
replace the current model in memory with the model from disk
please give only a name without extensions
the filename must be in ./models
Args:
fileName(string) the name of the file without extension, we also accept a dict here: a list of nodes
includeData bool: if set to false, the values for tables and files will NOT be loaded
update : if set to true, auto correct missing entries in known templates
"""
result = False
self.logger.info(f"load {fileName}, includeData {includeData}")
with self.lock:
self.publish_event(f"loading model {fileName}...")
self.disable_observers()
try:
if type(fileName) is str:
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
#if os.path.dirname(fileName)
f = open(os.path.join(model_directory, model_filename) + ".model.json","r")
model = json.loads(f.read())
self.model = model
f.close()
self.currentModelName = fileName
elif type(fileName) is dict:
self.model = copy.deepcopy(fileName) # take over the nodes
self.currentModelName = "fromNodes"
#now also load the tables
self.globalIdCounter = 0 #reset the counter and recover it further down
for nodeId in self.model:
if not self.idCreationHash:
#we only recover the counter if necessary
if int(nodeId)>self.globalIdCounter:
self.globalIdCounter = int(nodeId) # here, we recover the global id counter
if includeData:
if "version" in self.model["1"] and self.model["1"]["version"]>=0.1:
#new loader
self.ts.load(os.path.join(model_directory, model_filename))
else:
self.logger.debug("time series compatibility loader")
#we assume data in file and use the standard inmemory table storage
for nodeId in self.model:
if self.get_node_info(nodeId)["type"] == "table":
table = self.get_browse_path(nodeId)
data = numpy.load(os.path.join(model_directory, model_filename) + "." + table + ".npy")
#now find the time data, apply it to all variables
timeId=self.find_table_time_node(table)
ids = self.get_leaves_ids(table+".columns")
for id, column in zip(ids, data):
if id==timeId:
times = column
else:
self.ts.create(id)
self.set_properties({"type":"timeseries"},id)
self.ts.set(id,values=column)
for id in ids:
if id == timeId:
continue
self.ts.set(id,times=times)
self.clean_ts_entries() # make sure the model and ts table is consistent
self.instantiate_all_objects()
self.reset_all_objects()
self.enable_observers()
self.publish_event(f"loading model {fileName} done.")
self.model["1"]["version"]=self.version #update the version
result = True
except Exception as e:
self.logger.error("problem loading"+str(e))
self.publish_event(f"loading model {fileName} error.")
self.enable_observers()
result = False
if update:
self.update() # automatically adjust all widgets and other known templates to the latest style
return result
def create_differential_handle(self, user = None):
"""
make a copy of the current model and keep it as copy, create a handle for it and return that handle
this new handle is at the same time the id of te new "user", all the following requests for differential updata
will be referred to this user id
Returns:
a hash handle for the current model
"""
with self.lock:
#newHandle = str(uuid.uuid4().hex) # make a new unique handle
newHandle = str(self.diffHandleCounter)
self.diffHandleCounter += 1
if not user:
#also create a new user
user = newHandle
self.differentialHandles[newHandle]= {
"user":user,
"model":self.get_model_for_web(),
"time": int(time.time()),
"updateCounter": self.modelUpdateCounter
}# make an entry by copying the whole model
return newHandle
def get_differential_update(self,oldHandle,newHandle=None):
"""
this function takes the copy of the model (hopefully) held under handle and compares it to the current model:
the differences are analyzed and returned, t
to avoid endless storage of old references, we have the deletin stategy: for every "user" we keep a max of
self.differentialHandlesMaxPerUser, if we have more, we delete the oldest
Args:
oldHandle (string): the unique id of the old version of the model
newHandle (string): the unique id of the new version to compare to, if not given, we take the current
and will automatically make a new entry for the current
delOld: if set, we remove the old entry from the memorized models with a one step delay
Returns (dict):
containing information about the changes between and old and new version of the model
key values:
"handle":(string): the handle under which we find the new version of the model
"newNodes": (dict) nodes which are new to the tree in the form Nodeid:{properties}
"deletedNodeIds": (list) list of node ids which have been deleted
"modifiedNodes": (dict) nodes which have changed properties: if so, we give the full updated node back
"""
with self.lock:
diff={"handle":None,"newNodes":{},"deletedNodeIds":[],"modifiedNodes":{}} # the response for web
if oldHandle not in self.differentialHandles:
return None # the old handle does not exist, we can't handle this request
if newHandle is None:
# this is the standard case, we generate the new handle now
user = self.differentialHandles[oldHandle]["user"]
# we make a quick check if the model has changed at all, if not we simply return the old handle
if self.differentialHandles[oldHandle]["updateCounter"] == self.modelUpdateCounter:
self.logger.debug("get_differential_update: shortcut for no changes")
diff["handle"] = oldHandle
return diff
newHandle = self.create_differential_handle(user=user) # this function also makes a copy of the current tree and puts it in the self.differential handles list
newModel = self.differentialHandles[newHandle]["model"]
else:
if newHandle in self.differentialHandles:
newModel = self.differentialHandles[newHandle]
else:
return None # the newhandle did not exist
oldModel = self.differentialHandles[oldHandle]["model"]
# delete strategy: for every "user" we track a maximum of self.differentialHandlesMaxPerUser
users={}
for handle,entry in self.differentialHandles.items():
user = entry["user"]
if user not in users:
users[user]={}
users[ user][ handle ] = entry["time"]
for user,entries in users.items():
if len(entries)> self.differentialHandlesMaxPerUser:
#must clean up history of that user, entries is a dict of handle:time
sortedKeys =[key for key, value in sorted(entries.items(), key=lambda item: item[1])]
removeKeys = sortedKeys[:-self.differentialHandlesMaxPerUser]
self.logger.debug("remove handle"+str(removeKeys)+" of user"+user)
for key in removeKeys:
del self.differentialHandles[key]
#find the changes between the models
for newNodeId in newModel:
if newNodeId not in oldModel:
#this node is not found in the old model, so it is new
diff["newNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
else:
#this node is in both models, check if there was a change insight the nodes
#for a deep comparison, serialize them
newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)
oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)
if newNodeSerialized != oldNodeSerialized:
#something is different, so return that node
diff["modifiedNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
#now check for deleted once, these appear in the old but not in the new
diff["deletedNodeIds"]=list(set(oldModel.keys())-set(newModel.keys()))
diff["handle"]=newHandle
return diff
def publish_event(self, event):
"""
send out an event e.g. for status information
event to send looks like
event = { "id": 1123,
"event": "system.status"
"data:"{"nodeId":xx, "value":..,"function":... ...}
}
Args
event [string or dict]
"""
self.logger.debug(f"publish_event ({event})")
self.modelUpdateCounter += 1
if type(event) is str:
#make sure the formatting is json compatible
event = event.replace("'",'"')# ' => "
event={"event":"system.status","data":{"text":event}}
event["id"]=self.modelUpdateCounter
for observerObject in self.observers:
observerObject.update(event)
def disable_observers(self):
self.lock_model()
#with self.lock:
self.disableObserverCounter += 1
#self.logger.debug(f"disable_observers() {self.disableObserverCounter}")
def enable_observers(self):
self.release_model()
if self.disableObserverCounter >0:
self.disableObserverCounter -=1
else:
self.logger.error("enable_observers without disable observers")
#self.logger.debug(f"enable_observers() {self.disableObserverCounter}")
def notify_observers(self, nodeIds, properties, eventInfo={}):
"""
public wrapper for __notify observser, only expert use!
"""
#self.logger.info(f"notify observses(), {str_lim(nodeIds,50)}, {properties}")
return self.__notify_observers(nodeIds,properties,eventInfo)
def get_referencers(self,descList,deepLevel = 0):
"""
get the references to this node via backtraversing the leaves algorithm
we look for parents through deepLevel levels and from there on we look back for referencers
deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level
Returns:
a list of referencers ids that point to the given descList nodes
"""
#convert all to nodes to ids
if type(descList) is not list:
descList = [descList]
startList = set([self.__get_id(node) for node in descList])
startList =set([node for node in startList if node]) #remove None and duplicates
referencers = set() #we collect the parents here and avoid duplicates
#in this first iteration we take the referencers pointing directly to the nodes or their parents
workList = startList.copy()
for level in range(deepLevel+1):
#from this level we take the backrefs
for id in workList:
referencers.update(self.model[id]["backRefs"])
#prepare parents for next round
parents=set()
for id in workList:
myParent=self.model[id]["parent"]
if myParent not in ["0","1"]: #root
parents.update([myParent]) #!use list to avoid break into chars
#now take the parents as currentList
workList = parents.copy()
if workList ==[]:
break #avoid turning cycles for nothing
#second step:
# now we take all final referencers and all referencers to those referencers with no limit
# (go back the leaves algorithm)
collectedReferencers = referencers.copy() # we take all we have so far
while True:
workList=set()
for id in referencers:
workList.update(self.model[id]["backRefs"])
collectedReferencers.update(workList)
if not workList:
break
else:
#one more round
referencers = workList.copy()
return list(collectedReferencers)
def __notify_observers(self, nodeIds, properties, eventInfo={} ):
"""
this function is called internally when nodes or properties have changed. Then, we look if any
observer has to be triggered
we also increase the counter and time on the root.observers.modelObserver
Args:
nodeId: the nodeIds where a change occurred
properties: the property or list of properties of the node that has changed
"""
#exception for the progress node
if type(properties) is not list:
properties = [properties]
if type(nodeIds) is not list:
nodeIds = [nodeIds]
if self.disableObserverCounter>0:
#only one exception: progress works always
mustReturn = True
with self.lock:
for nodeId in nodeIds:
if self.model[nodeId]["name"] == "progress":
mustReturn = False
break
if mustReturn:
#self.logger.info(f"__notify_observers disable return {nodeIds} {properties}")
return
with self.lock:
# this is for the tree updates, any change is taken
self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies
collectedEvents=[]
enableTree = self.get_node("root.system.enableTreeUpdateEvents")
if enableTree and enableTree.get_value()==False:
pass
else:
# Notify all observers about the tree update, this is a standard
event = {
"id": self.modelUpdateCounter,
"event": "tree.update",
"data": ""}
collectedEvents.append(event) # send later
names =[self.model[id]["name"] for id in nodeIds]
self.logger.debug(f"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}")
triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered
#p=utils.Profiling("__notify.iterate_nodes")
referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy
nodeId = self.__get_id(nodeIds[0])#take the first for the event string,
#p.lap(f"get refs for {nodeId}")
self.logger.debug(f"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}")
for id in referencers:
if self.model[id]["name"] == "targets" and self.model[self.model[id]["parent"]]["type"] == "observer":
# this referencers is an observer,
observerId = self.model[id]["parent"]
observer = self.get_children_dict(observerId)
# check if trigger
if observer["enabled"]["value"] == True:
#self.logger.debug(f"{self.model[nodeId]["name"]} is targeted by observer {self.get_browse_path(observerId)}")
if observerId in triggeredObservers:
self.logger.debug(f"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass")
continue
#self.logger.debug(f"check properties to triggered the observer {self.get_browse_path(observerId)}")
#check if any of the observed properties matches
propertyMatch = False
for property in properties:
if property in observer["properties"]["value"]:
propertyMatch=True
break
if not propertyMatch:
#self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} no property match ")
pass
else:
self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} for change in {property}")
self.model[observer["triggerCounter"]["id"]]["value"] = self.model[observer["triggerCounter"]["id"]]["value"]+1
self.model[observer["lastTriggerTime"]["id"]]["value"] = datetime.datetime.now().isoformat()
for funcNodeId in self.get_leaves_ids(observer["onTriggerFunction"]["id"]):
self.logger.debug(f"execute ontrigger function {funcNodeId}")
self.execute_function(funcNodeId)
if "triggerSourceId" in observer:
self.model[observer["triggerSourceId"]["id"]]["value"] = nodeId
if observer["hasEvent"]["value"] == True:
#self.logger.debug(f"send event {observer["eventString"]["value"]}")
#also send the real event
#self.modelUpdateCounter = self.modelUpdateCounter+1
event = {
"id": self.modelUpdateCounter,
"event": observer["eventString"]["value"],
"data": {"nodeId":observerId,"sourceId":nodeId,"sourcePath":self.get_browse_path(nodeId)}}
if self.model[nodeId]["type"] not in ["column","file","timeseries"]:
event["data"]["value"]=self.model[nodeId]["value"]
#some special handling
try:
if event["event"] == "system.progress":
progressNode = self.get_node(self.get_leaves_ids("root.system.progress.targets")[0])
event["data"]["value"] = progressNode.get_value()
event["data"]["function"] = progressNode.get_parent().get_parent().get_browse_path()
else:
eventNode = self.get_node(observerId)
extraInfoNode = eventNode.get_child("eventData")
if extraInfoNode:
extraInfo = extraInfoNode.get_value()
if type(extraInfo) is not dict:
extraInfo={"info":extraInfo}
event["data"].update(extraInfo)
if eventInfo:
event["data"]["_eventInfo"]=eventInfo #put this only if we have info
except Exception as ex:
self.logger.error(f"error getting extra info for event {ex}, {sys.exc_info()[0]}")
#for all other events, take the event data if there is one (as json)
self.logger.debug(f"generate event {event}")
collectedEvents.append(event)
triggeredObservers.append(observerId)# next time, we don't trigger
#p.lap("complete backrefs {nodeId}, {backrefs}")
#self.logger.debug(p)
#self.logger.debug("now send the events")
#event = copy.deepcopy(event)
for event in collectedEvents:
for observerObject in self.observers:
observerObject.update(event)
self.logger.debug(f"done sending {len(collectedEvents)} events")
def create_observer(self):
# Instantiate a new observer
observer = Observer(self)
# attach it to the model
self.attach_observer(observer)
# return the observer
return observer
def attach_observer(self, observer):
# Add a new observer
self.logger.debug(f"Adding new observer: {id(observer)}")
with self.lock:
self.observers.append(observer)
def detach_observer(self, observer):
with self.lock:
try:
self.observers.remove(observer)
self.logger.debug(f"Removing observer: {id(observer)}")
except ValueError:
self.logger.exception("Trying to remove an observer which doesn't exist in the list of observers.")
def set_column_len(self,nodeDescriptor,newLen):
"""
adjust the len of a colum, extension are inf-padded,
Args: nodeDescriptor: the node
newLen (int) the new lenth of the column
Returns:
the new value set or none if problem
"""
with self.lock:
id = self.get_id(nodeDescriptor)
if not id: return None
if self.model[id]["type"] != "column":
self.logger.error("set_column_len: not a column")
return None
#now make the adjustments
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = numpy.full(newLen, numpy.nan)
else:
#is already an array
if len(self.model[id]['value']) == newLen:
#nothing to do
pass
if len(self.model[id]['value']) > newLen:
self.model[id]['value'] = self.model[id]['value'][0:newLen]
elif len(self.model[id]['value']) < newLen:
self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))
else:
#same len
pass
return newLen
def get_upload_folder_files(self, matchFilter=None, blackList = []):
"""
Args:
fileNameMatch: a string that must be contained in the files to deliver
blackList: a list of filenames which should not be delivered
Returns list of files with absolute file names, list of files with fileNames
"""
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path+r'\upload'
absFileNames = []
foundFileNames = []
#now iterate the uploaded files
fileNames = os.listdir(folder)
for idx,fileName in enumerate(fileNames):
if matchFilter:
if matchFilter not in fileName:
continue # this file will be ignored
if fileName in blackList:
continue
foundFileNames.append(fileName)
absFileNames = [folder+"\\"+fileName for fileName in foundFileNames]
return foundFileNames,absFileNames
def update(self):
"""
update all known widgets to the latest template including complex backward compatibility changes
:return:
"""
self.logger.info("update() running...")
self.disable_observers()
try:
# the ts widgets:
# now go throught the widget and update all according the template
# now find all type widget
newNodes = {}
helperModel = Model()
helperModel.disable_observers()
helperModel.create_template_from_path("root.widget", self.get_templates()['templates.timeseriesWidget'])
widgets = []
for id, props in self.model.items():
if props["type"] == "widget":
widgetObject = self.get_node(id)
if widgetObject.get_child("widgetType").get_value() == "timeSeriesWidget":
widgets.append(id)
self.logger.debug(f"update():found widget {widgetObject.get_browse_path()}")
for id in widgets:
path = self.get_browse_path(id)
mirrorBefore = self.get_branch_pretty(path)
self.create_template_from_path(path,self.get_templates()['templates.timeseriesWidget']) # this will create all nodes which are not there yet
# now make specific updates e.g. linking of referencers, update of list to dicts etc.
# if colors is a list: make a dict out of it
colors = self.get_value(f"{id}.hasAnnotation.colors")
tags = self.get_value(f"{id}.hasAnnotation.tags")
if type(colors) is list:
colors = {v:{"color":colors[idx],"pattern":None} for idx,v in enumerate(tags)}
self.logger.debug(f"update(): set value{id}.hasAnnotation.colors := {colors} ")
self.set_value(f"{id}.hasAnnotation.colors",colors)
if not "visibleTags" in mirrorBefore["hasAnnotation"] or (self.get_value(f"{id}.hasAnnotation.visibleTags") != mirrorBefore["hasAnnotation"]["visibleTags"][".properties"]["value"]):
#it is different or new, so we created it now
visibleTags = {tag:True for tag in tags}
#make sure that from the colors, we take them as well
updateVisibleTags = {tag:True for tag in colors}
visibleTags.update(updateVisibleTags)
self.set_value(f"{id}.hasAnnotation.visibleTags",visibleTags)
self.logger.debug(f"update(): set value{id}.visibleTagss := {visibleTags} ")
#make sure the hasAnnotation.annotations referencer points to newannotations as well
self.add_forward_refs(f"{id}.hasAnnotation.annotations",[f"{id}.hasAnnotation.newAnnotations"],allowDuplicates=False)
#now make sure the observers have at least the required properties enabled
widget = self.get_node(id)
helperRoot = helperModel.get_node("root.widget")
template = self.get_templates()['templates.timeseriesWidget']
children = helperRoot.get_children(3)
print(f"2 level children {[node.get_browse_path() for node in children]}")
for child in helperRoot.get_children():
if child.get_properties()["type"] == "observer":
widgetNode = widget.get_child(child.get_name()).get_child("properties")
helperNode = child.get_child("properties")
for prop in helperNode.get_value():
current = widgetNode.get_value()
if prop not in current:
current.append(prop)
widgetNode.set_value(current)
for child in helperRoot.get_children(3):
if child.get_properties()["type"] == "referencer":
self.logger.debug(f"found referencer {child.get_name()}")
# now adjust the references of new nodes and of the ones that were there
targets = child.get_properties()["forwardRefs"]
if targets:
targets = [helperModel.get_browse_path(ref) for ref in targets]
requiredTargets = [widget.get_browse_path()+"."+".".join(ref.split(".")[2:]) for ref in targets]
self.logger.debug(f"required targets {requiredTargets}")
#now check in the model
widgetNodePath = widget.get_browse_path()+ child.get_browse_path()[len(helperRoot.get_browse_path()):]
widgetNode = self.get_node(widgetNodePath)
#now check if we have them
targetPaths = [tNode.get_browse_path() for tNode in widgetNode.get_targets()]
for target in requiredTargets:
if target not in targetPaths:
self.logger.debug(f"adding ref {widgetNode.get_browse_path()} => {target}")
self.add_forward_refs(widgetNode.get_id(),[target])
#now the system progress observer
if not self.get_node("root.system.progress"):
self.create_template_from_path("root.system.progress",self.get_templates()['system.observer'])
self.set_value("root.system.progress.hasEvent",True)
self.set_value("root.system.progress.eventString","system.progress")
self.set_value("root.system.progress.properties",["value"])
self.set_value("root.system.progress.enabled",True)
except Exception as ex:
self.logger.error(f" {ex} , {sys.exc_info()[0]}")
helperModel.delete()
helperModel.delete()
self.enable_observers()
# ########################################
# time series api
def time_series_create(self,desc):
id = self.get_id(desc)
return self.ts.create(id)
def time_series_delete(self,desc):
id = self.get_id(desc)
return self.ts.delete(id)
def time_series_insert(self, desc, values=None, times=None, allowDuplicates = False):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.insert(id,values, times,allowDuplicates=allowDuplicates)
self.__notify_observers(id, "value")
return result
def time_series_append(self, desc, values=None, times=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.append(id,values, times)
self.__notify_observers(id, "value")
return result
def time_series_delete_area(self,desc,start=None,end=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.delete_area(id,start=start,end=end)
self.__notify_observers(id, "value")
return result
def time_series_merge(self, desc, values = None, times = None):
id = self.get_id(desc)
if not id in self.model:
return False
return self.ts.merge(id,values=values,times=times)
def time_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
result = self.ts.set(id,values=values,times=times)
self.__notify_observers(id, "value")
return result
def time_series_get_table(self,
variables,
tableDescriptor = None,
start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None,
copy=True):
"""
get a time series table from variables (nodes of type "timeseries").
Args:
variables [list of ode descriptors]: nodes to be part the data table requested (ordered!)
tableDescriptor : a desc for the table where the variables reside
possible addressing of te request nodes:
1) ids or browsepaths of nodes (no tableDescriptor needed)
2) names of nodes and tableDescriptor of the table (names must be unique in the columns of the table)
startime, endTime [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
includeIntervalLimits [bool]: if set to true, we will include one more data point each left and right of the requested time
format: [enum] "default", "flat", see return description
resampleMethod [enum]:
how to resample if we need to; options are:
None (if not specified): sample and hold
"linear": linear interpolation
"linearfill": linear interpolation and also interpolate "nan" or "inf" values in the original data
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"var_a":{"values":[],"__time":[]}, "var_b":{"values":[],"__time":[]..}
"flat" return the result as {"var_a":[], "var_a__time":[],"var_b":[],"var_b__time":[]....}
the variable descriptor are the ones given in the request
"__time" : list of timestamps for the returned table in epoch seconds as float64
"values": the list of float values of one of the requested variables
"""
if tableDescriptor:
tableId = self.get_id(tableDescriptor)
tableVars = self.get_leaves(tableId+".columns")
else:
tableId = None
if type(start) is str:
start = date2secs(start)
if type(end) is str:
end = date2secs(end)
with self.lock:
#first check if all requested timeseries exist and have type time series
#vars = [] #self.get_id(variables)
if not type(variables) is list:
variables= [variables]
varIds = {} # NodeId: request descriptor
for var in variables:
varId = self.get_id(var)
if not varId:
#try to find per columns and table desc
found = False
if tableId:
for tableVar in tableVars:
if tableVar["name"] == var:
varId = tableVar["id"]
found = True
break
if not found:
self.logger.error(f"requested variable {var} does not exist")
return False
if self.model[varId]["type"]!="timeseries":
self.logger.error(f"requested variable {var} not timeseries, instead {self.model[varId]["type"]}")
return False
varIds[varId]=var #remeber it for later
table = self.ts.get_table(list(varIds.keys()), start=start, end=end, copy=copy, resampleTimes=resampleTimes, noBins = noBins, includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod)
#now wrap back the descriptor to the query, if is was a browsepath, we return and browsepath, if is was an id, we return id
# make some formatting
def convert(input,toList=toList):
if toList:
return list(input)
else:
return input
result = {}
for k,v in table.items():
if format=="flat":
result[varIds[k]]=convert(v["values"])
result[varIds[k]+"__time"]=convert(v["__time"])
else:
result[varIds[k]] = {"values":convert(v["values"]),"__time":convert(v["__time"])}
#if len(variables) == 1:
# #we only have one variable, so we return without descriptor
# result = result[list(result.keys())[0]]
return result
def time_series_get_info(self,name=None):
return self.ts.get_info(name)
def time_series_get_raw(self,id,start=None,end=None):
table = self.ts.get_table([id], start=start, end=end, copy=False, resampleTimes=None,
noBins=None, includeIntervalLimits=False,
resampleMethod=None)
result = table[id]
return result
def time_series_insert_blobs(self, tableDesc, blobs=[]):
""" blob is a dict or list of dicts of key and values containing one time base like
the descriptors of teh variables can be ids, browsepaths or just names (without dots)
if the descriptors are names, we try to find them in the model, they must exist there uniquely, otherwise
they cant be processed
we also autocreate the table or missing variables
the data will be put in a table:
- we try to find the table based on one of the variables, if not found, we create the table
{
"a": [1.5,1.6,1.7]m
"b": [2,3,4]
"__time" :[100001,100002,100003]
}
"""
if not type(blobs) is list:
blobs=[blobs]
#first, find the table
with self.lock:
tableId = self.get_id(tableDesc)
if not tableId:
#try to find the table from the first node
#table not found, create it
tableId = self.create_node_from_path(tableDesc,properties={"type":"table"})
if tableId:
columnsId = self.create_node(parent=tableId, name="columns", properties={"type": "referencer"})
variablesId = self.create_node(parent=tableId, name="variables", properties={"type": "folder"})
else:
self.logger.error(f"cant create table {tableDesc}")
return False
else:
columnsId = self.get_child(tableId,"columns")
variablesId = self.get_child(tableId, "variables")
#now we know the tableId, columnsId, variablesId
# iterate over all blobs and find the ids of the names in the blobs, if not found, create it
# exchange the descriptors to ids
desc2Id = {} # key: the descriptor from the input blob v: the id in the model
tableVars = self.get_leaves(columnsId)
desc2Id = {dic["name"]:dic["id"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names
#convert all to ids
newBlobs=[]
idsInBlobs=[]
for blob in blobs:
newBlob={}
for k,v in blob.items():
if k=="__time":
newBlob[k]=v
else:
#does this id already exist?
if k in desc2Id:
id = desc2Id[k]
else:
id = None
#try to find
for var in tableVars:
if var["name"] == k:
id = v["id"]
break
if not id:
#still not found, we need to create it
id = self.create_node(parent=variablesId,name=k,properties={"type": "timeseries"})
if not id:
self.logger.error(f"cant find or create {name}")
continue
else:
self.add_forward_refs(columnsId,[id])
desc2Id[k]=id #remember to speed up next time
newBlob[id] = v
idsInBlobs.append(id)
newBlobs.append(newBlob)
self.logger.debug(f"inserting blobs {len(newBlobs)}")
self.__notify_observers(idsInBlobs, "value")
result = self.ts.insert_blobs(newBlobs)
return result
# ########################################
# event series api
def event_series_create(self,desc,map={}):
id = self.get_id(desc)
if "eventMap" in self.model[id]:
self.model[id]["eventMap"].update(map)
else:
self.model[id]["eventMap"]=map.copy()
return self.ts.create(id)
def event_series_get_new_number_entry(self,id):
eventMap = self.model[id]["eventMap"]
numbers = [v for k, v in eventMap.items()]
newNumber = max(numbers)+1
while newNumber in numbers:
newNumber = newNumber+1
return newNumber
def event_series_get_event_number(self, desc, event, autoCreate=True):
id = self.get_id(desc)
if not id:
return None
with self.lock:
eventMap = self.model[id]["eventMap"] # a dict like {"starting":1, "machineStop":2,...}
if type(event) in [str,numpy.str_]:
if event not in [k for k,v in eventMap.items()]:
if not autoCreate:
return None
# we must put a new eventString
if eventMap == {}:
newEventNumber = 1
else:
newEventNumber = self.event_series_get_new_number_entry(id)
self.model[id]["eventMap"][event] = newEventNumber
return newEventNumber
else:
#is a known event string, get the number
return eventMap[event]
else:
#this is a number already, check if it is in the map
eventNumbers = [v for k,v in eventMap.items()]
if event in eventNumbers:
return event
else:
if not autoCreate:
return None
#must create a new entry
try:
#to make sure we have only numbers there
newEventString = "event_"+str(int(event))
self.model[id]["eventMap"][newEventString]=int(event)
except:
self.log_error()
return None
return event
def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):
"""
Args:
values: list of events, where the event is either an eventString or an event number
if values is a scalar, we assume that for all times the same event will be inserted
allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time
different events are always allowed on the same time
"""
id = self.get_id(desc)
if not id in self.model:
return None
if not values or not times:
return None
if not(type(values) is list or type(values) is numpy.ndarray):
values = [values]*len(times)
#convert the values to numbers and create new map entry if needed
numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)
#convert the times to epoch if not already done
epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)
if not allowEventDuplicates:
# we must delete the events which exist already at the same time with the same event
data = self.event_series_get(desc)
takeIndices = numpy.full(len(times),True)
for idx,tim in enumerate(times):
duplicates = numpy.where(data["__time"]==tim)[0]
for pos in duplicates:
if numbers[idx] == data["values"][pos]:
takeIndices[idx] = False
numbers = numbers[takeIndices]
epochs = epochs[takeIndices]
with self.lock:
#on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times
# such that different or the same events can happen at the same time and thus produce the same
# time stamp in the time series
result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!
self.__notify_observers(id, "value")
return result
def event_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
# now "refresh" the event map
#self.model[id]["eventMap"]={}
numbers = [self.event_series_get_event_number(id, event) for event in values]
result = self.ts.set(id,values=numbers,times=times)
self.__notify_observers(id, "value")
return result
def event_series_get(self,desc, start=None,end=None,format="default",eventFilter=None):
"""
get events from a event series
Args:
desc: node descricptor
start , end [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
format: [enum] "default"
eventFilter : [string] a list of eventStrings as positive match filter
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"values":[],"__time":[], "eventstrings": "map":{1:"myevent",2:"anotherevent"}
"""
id = self.get_id(desc)
if not id:
return None
data = self.ts.get_table([id], start=start, end=end)
if data == {}:
#this variable is not in the store
data = {id:{"values":numpy.asarray([]),"__time":numpy.asarray([])}}
eventMap = self.model[id]["eventMap"].copy()
reverseMap = {v:k for k,v in eventMap.items()}
values = data[id]["values"].astype(numpy.int)
times = data[id]["__time"]
#now filter
if eventFilter:
filter = []
if type(eventFilter) is not list:
eventFilter = [eventFilter]
for evString in eventFilter:
if evString in eventMap:
filter.append(eventMap[evString])
indices = [idx for idx,val in enumerate(values) if val in filter]
values = values[indices]
times = times[indices]
result = {
"values":values,
"__time":times,
"eventMap":eventMap,
"eventStrings":[reverseMap[v] for v in values]
}
if format == "iso":
#convert the timestamps to iso
result["__time"]=[epochToIsoString(t) for t in result["__time"]]
if format == "events":
existingEvents = set(result["values"])
events = {reverseMap[ev]:[] for ev in existingEvents}
for ev,ti in zip(result["values"],result["__time"]):
events[reverseMap[ev]].append(ti)
result["events"]=events
del result["values"]
del result["__time"]
del result["eventStrings"]
return result
def event_series_insert_blob(self,blob):
"""
insert events in various blob syntax
Args:
desc: the node descriptor
blob: a dictionary in various styles
a) {
"node": nodedescriptor
"events":"startMachine"
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
b) {
"node": nodedescriptor
"events":["startMachine","stopMachine","startMachine","startMachine]
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
c) "events:[
{"event":"startMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
},
{"event":"stopMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
}
Returns
true/false for success
"""
if type(blob["events"]) is not list:
#style a)
events = blob["events"]
times = blob["__time"]
else:
#events is a list
if type(blob["events"][0]) is dict:
#style c)
events = []
times = []
for d in blob["events"]:
events.append(d["event"])
times.append(d["__time"])
else:
#style b)
events = blob["events"]
times = blob["__time"]
return self.event_series_insert(blob["node"],events,times)
def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):
id = self.get_id(desc)
if not id:
return None
if start == None and end == None and eventsToDelete == []:
#delete all
with self.lock:
self.model[id]["eventMap"]={}
result = self.ts.set(id, values=[], times=[])
else:
#delete some events
with self.lock:
data = self.ts.get_table([id])
if not start:
start = 0
if not end:
end = numpy.inf
times = data[id]["__time"]
values = data[id]["values"]
over = times>=start
under = times<=end
deleteMaskTime = over & under
if eventsToDelete == []:
deleteMaskValues = numpy.full(len(deleteMaskTime),True)
else:
deleteMaskValues = numpy.full(len(deleteMaskTime),False)
for ev in eventsToDelete:
evNumber = self.model[id]["eventMap"][ev]
mask = values == evNumber
deleteMaskValues = deleteMaskValues | mask
deleteMask = deleteMaskTime & deleteMaskValues
times = times[~deleteMask]
values = values[~deleteMask]
self.event_series_set(id,values,times)
print(data)
def get_object(self,desc):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return None
if "object" not in self.model[id]:
return None
return self.model[id]["object"]
def instantiate_object(self,desc,writeToModel=True):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return False
try:
className = self.model[id]["class"]
if "autoReload" in self.model[id] and self.model[id]["autoReload"]==True and self.global_auto_reload_enabled():
# must reload the module
module = importlib.reload(self.objectClasses[className]["module"])
classDefinition = getattr(module, className.split('.', 1).pop())
# now update our global list
self.objectClasses[className]["module"] = module
self.objectClasses[className]["class"] = classDefinition
classDefinition = self.objectClasses[className]["class"]
object = classDefinition(self.get_node(id)) #instantiate the object
if writeToModel:
self.model[id]["object"]=object
return object
except:
self.log_error()
return None
def instantiate_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.instantiate_object(id)
except:
self.log_error()
def reset_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.get_object(id).reset(None)
except:
self.log_error()
def global_auto_reload_enabled(self):
if self.get_value("root.system.enableAutoReload") == False:
return False
else:
return True # this will also be the case if the node is not there, as the get_value return None then
def create_test(self,testNo=1):
"""
this functions crates tests for demostrating purposes
"""
if testNo == 1:
self.create_node("root",name="variables",type="folder")
for var in ["f0","f1","f2","f3","count","time","back"]:
self.create_node("root.variables",name=var,type="column")
self.create_node_from_path('root.folder2.myconst',{"type":"const","value":"21data"})
self.create_node_from_path('root.folder2.myfkt', {"type": "function"})
#for the visu
self.create_node_from_path('root.visualization.pipelines.occupancy.url',{"type":"const","value":"http://localhost:5006/bokeh_web"})
self.create_node_from_path('root.visualization.pipelines.demo2.url',{"type":"const","value":"http://21data.io"})
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is a great table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "numberOfRows",
"type": "variable",
"value": 0
}
]
self.create_node("root", name="mytable", type="table")
self.create_nodes_from_template("root.mytable", template=template)
for var in ["f0","f1","f2","f3","time","back"]:
self.add_forward_refs("root.mytable.columns",["root.variables."+var])
self.add_forward_refs("root.mytable.timeField", ["root.variables.time"])
#add data
startTime=datetime.datetime(2018,1,1,0,0,0,tzinfo=pytz.UTC)
vars={"f0":0.01,"f1":0.02,"f2":0.04,"f3":0.1,"back":0.01}
SIZE = 10*60 # in seconds units
STEP = 0.1
#!!! we are producing size/step time points
""" for i in range(SIZE):
dataDict = {}
for var in vars:
value = numpy.cos(2*numpy.pi*vars[var]*i/SIZE*3)
dataDict["root.variables."+var]=value
mytime = startTime + datetime.timedelta(seconds = i)
dataDict["root.variables.time"] = mytime
#print(mytime)
self.add_timeseries(dataDict)
"""
startEpoch = date2secs(startTime)
times = numpy.arange(startEpoch,startEpoch+SIZE,STEP,dtype=numpy.float64)
print("we have time:",times.shape)
for var in vars:
values = numpy.cos(2*numpy.pi*vars[var]*times)
id=self.get_id("root.variables."+str(var))
if var =="back":
#we make -1,0,1 out of it
values = numpy.round(values)
self.model[id]["value"]=values.tolist()
id = self.get_id("root.variables.time")
self.model[id]["value"]=(times).tolist()
#now correct the background
#now make some widget stuff
self.create_node_from_path('root.visualization.widgets.timeseriesOne',{"type":"widget"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectableVariables',
{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectedVariables',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.startTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.endTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.bins',
{"type": "const","value":300})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation',
{"type": "const", "value": True})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasSelection',
{"type": "const", "value": False})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.annotations',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations',
{"type": "folder"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.tags',
{"type": "const","value":["one","two"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.colors',
{"type": "const","value":["yellow","brown","greay","green","red"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.table',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.lineColors',
{"type": "const", "value": ["blue", "yellow", "brown", "grey", "red"]})
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectedVariables',['root.variables.f0','root.variables.f1','root.variables.f3'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectableVariables',['root.variables'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.table',['root.mytable'])
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observer',{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observerUpdate', {"type": "const","value":["line","background","annotations"]})
#now the annotations
anno = [
{
"name": "tags",
"type": "const",
"value": ["one","two"]
},
{
"name": "startTime",
"type": "const",
"value": None
},
{
"name": "endTime",
"type": "const",
"value": None
},
{
"name": "text",
"type": "const",
"value": "this is a great annotation"
}
]
tags=["one","two","one","one","two","two","one","one","one","two","one","one"]
self.create_node_from_path("root.annotations",{"type":"folder"})
startTime = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)
for i in range(10):
newAnno = copy.deepcopy(anno)
newAnno[1]["value"] = (startTime + datetime.timedelta(minutes=(i*10))).isoformat()
newAnno[2]["value"] = (startTime + datetime.timedelta(minutes=(i*10+1))).isoformat()
newAnno[0]["value"] = [tags[i],tags[i+1]]
newAnnoPath = "root.annotations.anno"+str(i)
self.create_node_from_path(newAnnoPath,{"type":"annotation"})
self.create_nodes_from_template(newAnnoPath,newAnno)
#also add the annotations to the widget
self.add_forward_refs("root.visualization.widgets.timeseriesOne.hasAnnotation.annotations",["root.annotations","root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations"])
#make a real function
self.create_node_from_path("root.functions",{"type":"folder"})
self.create_nodes_from_template("root.functions",[self.templates["testfunction.delayFunctionTemplate"]])
#now make cutom function to trigger something
self.create_nodes_from_template("root.functions",[self.templates["counterfunction.counterFunctionTemplate"]])
#now hook the function output to the observer of the plot
self.add_forward_refs('root.visualization.widgets.timeseriesOne.observer',['root.functions.counterFunction.output'])
#now make custom buttons
buttons = [
{
"name":"button1",
"type":"folder",
"children":[
{"name":"caption","type":"const","value":"start learner"},
{"name":"counter", "type": "variable", "value":0},
{"name": "onClick", "type": "referencer"}
]
}
]
self.create_node_from_path("root.visualization.widgets.timeseriesOne.buttons",{"type":"folder"})
self.create_nodes_from_template("root.visualization.widgets.timeseriesOne.buttons",buttons)
self.add_forward_refs("root.visualization.widgets.timeseriesOne.buttons.button1.onClick",["root.functions.counterFunction"])
#now the backgrounds
self.create_node_from_path("root.visualization.widgets.timeseriesOne.hasBackground",{"type":"const","value":True})
self.create_node_from_path("root.visualization.widgets.timeseriesOne.background",{"type":"referencer"})
self.add_forward_refs("root.visualization.widgets.timeseriesOne.background",["root.variables.back"])
self.create_node_from_path("root.visualization.widgets.timeseriesOne.backgroundMap",{"type":"const","value":{"1":"red","0":"green","-1":"blue","default":"white"}})
self.show()
elif testNo == 2:
#we take the full test number 1 and rearrange some things
self.create_test(1)
self.currentModelName = "occupancydemo"
import data.occupancy_data.occupancy as occ
occData = occ.read_occupancy("./data/occupancy_data/datatest2.txt")
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is the occupancy data table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "variables",
"type": "folder",
}
]
self.create_node("root", name="occupancy", type="table")
self.create_nodes_from_template("root.occupancy", template=template)
for var in occData:
path = "root.occupancy.variables."+var
self.create_node_from_path(path,{"type":"column"})
self.set_value(path,occData[var])
self.add_forward_refs("root.occupancy.columns",[path])
self.add_forward_refs("root.occupancy.timeField",["root.occupancy.variables.date"])
#now create the classification
self.create_node("root.occupancy", name="classification", type="column")
self.set_value("root.occupancy.classification", [0]*len(occData[list(occData.keys())[0]]))
self.add_forward_refs("root.occupancy.columns", ["root.occupancy.classification"])
#create another TS-widget
self.create_node_from_path('root.visualization.widgets.timeseriesOccupancy', {"type": "widget"})
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy',modeltemplates.timeseriesWidget)
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy.buttons.button1',modeltemplates.button)
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectedVariables',["root.occupancy.variables.Temperature"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectableVariables',["root.occupancy.variables"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.table',['root.occupancy'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.background',['root.occupancy.classification'])
self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "brown", "1": "yellow", "-1": "blue", "default": "white"}) #match annotation colors
#self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "blue", "1": "black", "-1": "blue", "default": "white"}) #match annotation colors
self.set_value('root.visualization.widgets.timeseriesOccupancy.hasAnnotation.tags',["busy","free"])
#now create the logistic regression
self.create_nodes_from_template('root',[self.templates["logisticregression.logisticRegressionTemplate"]])
self.add_forward_refs('root.logisticRegression.input',['root.occupancy.variables.Temperature', 'root.occupancy.variables.Light','root.occupancy.variables.CO2'])
self.add_forward_refs('root.logisticRegression.output', ['root.occupancy.classification'])
self.add_forward_refs('root.logisticRegression.annotations',['root.visualization.widgets.timeseriesOccupancy.hasAnnotation.newAnnotations'])
self.set_value('root.logisticRegression.categoryMap', {"busy": 1, "free": 0})
#also hook the button on it
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.buttons.button1.onClick',['root.logisticRegression'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.observer',['root.logisticRegression.executionCounter']) # observe the execution of the scorer
self.show()
elif testNo == 3:
# make some nodes
for id in range(10):
self.create_node_from_path("root.add.var"+str(id), {"type": "variable", "value": id+100})
for id in range(100):
self.create_node_from_path("root.remove.var"+str(id), {"type": "variable", "value": id+100})
self.create_node_from_path("root.change_name_one")
self.create_node_from_path("root.change_value")
self.create_node_from_path("root.move.first")
self.create_node_from_path("root.move.second")
self.create_node_from_path("root.refs",properties={"type":"referencer"})
self.add_forward_refs("root.refs",["root.move.first","root.move.second","root.move"])
#now start a thread that changes the tree periodically
def __update_tree():
while True:
time.sleep(3.0)
with self.lock:
self.logger.debug("__update_tree")
self.create_node_from_path("root.add.dyn"+str(uuid.uuid4()))
removeFolder = self.get_id("root.remove")
if self.model[removeFolder]["children"]:
self.delete_node(self.model[removeFolder]["children"][0])
id = self.get_id("root.change_name_one")
if id:
self.model[id]["name"]="change_name_two"
else:
id = self.get_id("root.change_name_two")
self.model[id]["name"]="change_name_one"
id = self.get_id("root.move")
self.model[id]["children"].reverse()
id=self.get_id("root.refs")
self.model[id]["forwardRefs"].reverse()
self.set_value("root.change_value",int(uuid.uuid4())%100)
self.testThread = threading.Thread(target=__update_tree)
self.testThread.start()
if __name__ == '__main__':
def test1():
m=Model()
m.create_node("root",name="folder1")
m.create_node("root.folder1",name="folder2")
m.create_node("2",name="second")
m.create_node("root",name="myreferencer",type="referencer")
m.create_node("root.folder1",name="myvar",type="variable")
m.set_value("root.folder1.myvar",44.5)
m.add_forward_refs("root.myreferencer",["root.folder1"])
m.add_property("root.folder1.folder2","uasource","192.168.5.6")
m.show()
m.get_model()
m.delete_node("root.myreferencer")
return m
def test_template():
m=Model()
template = {
"myfunction": {
"type": "function",
"value": "someValue",
"opcua":"opc.tcp://129.160.1.1:4880::n2=2;s=mystrin"
},
"myreferencer": {
"type": "referencer",
"forwardRefs": ['.myfolder.var1', '.myfolder.var2', '.myfolder.var3']
},
"myfolder": {
"type": "folder",
"children": {
"var1": {"type": "const", "value": "1"},
"var2": {"type": "variable"},
"var3": {"type": "timeseries"},
}
},
}
m.create_nodes_from_template(template=template)
m.show()
def save_test():
print("save and load test")
m=Model()
m.create_test()
m.save("savetest")
n=Model()
n.load("savetest")
if len(n.get_model())!= len(m.get_model()):
print("unequal size")
return False
#now compare
mModel = m.get_model()
nModel = n.get_model()
for nodeId in mModel:
#print("check",nodeId)
try:
if nModel[nodeId]!=mModel[nodeId]:
print("unequal before after ",nodeId,m[nodeId],n[nodeId])
return False
except:
print("cant find",nodeId)
return False
print("savetest passed")
return True
def plugintest():
m=Model()
m.create_node("root", name="folder1")
m.create_nodes_from_template("root.folder1",m.templates["testfunction.delayFunctionTemplate"])
m.show()
m.execute_function("root.folder1.delayFunction")
statusNode = m.get_node("root.folder1.delayFunction.status")
progressNode = m.get_node("root.folder1.delayFunction.progress")
while(statusNode.get_value()!="finished"):
print("progress is",progressNode.get_value())
time.sleep(0.3)
print("execution re===================")
m.show()
def getnodetest():
m=Model()
m.create_node("root", name="folder1")
m.create_node("root.folder1", name="folder2")
m.create_node("root.folder1", name="myvar", type="variable")
myvar = m.get_node("root.folder1.myvar")
myvar.set_value(33)
print("value",myvar.get_value())
def testfunctions_test():
m = Model()
m.create_test(1)
m.show()
table= m.get_timeseries_table(["root.variables.f0","root.variables.f1","root.variables.time"],noBins=25)
print("shape",table.shape)
for row in table.T:
for elem in row:
print(str("%3.7f"%elem)," ",end="")
print("")
def time_conver_test():
d1=datetime.datetime(2018,1,1,0,0,0,tzinfo = pytz.UTC)
print(d1)
s1 = date2secs(d1)
print(s1)
d2 = secs2date(s1)
print(d2)
d3 ="2018-01-01T00:10:08.445+02:00"
print(d3)
d4=dateutil.parser.parse(d3)
print(d4)
s4=date2secs(d4)
print(s4)
d5=secs2date(s4)
print(d5)
def table_test():
m=Model()
print("this test creates a table and writes some data in")
template = [
{
"name": "type",
"type": "const",
"value": "timeSeriesTable"
},
{
"name":"description",
"type": "const",
"value": "this is a great table"
},
{
"name":"data",
"type":"folder",
"children":[
{"name":"var1","type": "column","value":[]},
{"name":"var2","type": "column","value":[]},
{"name":"var3","type": "column","value":[]},
{"name":"time","type": "column","value":[]}
]
},
{
"name":"columns",
"type": "referencer",
"forwardRefs": ['.data.var1', '.data.var2', '.data.var3',".data.time"]
},
{
"name":"timeField",
"type": "referencer",
"forwardRefs":['.data.time']
},
{
"name": "numberOfRows",
"type": "variable",
"value":0
}
]
m.create_node("root", name="mytable",type="table")
m.create_nodes_from_template("root.mytable",template=template)
m.show()
#now write some data with autocreates
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.mytable.data.var1":1,"root.mytable.data.var2":2,"root.mytable.data.time":myepoch,"root.mytable.data.newvar":99}
m.append_table(blob)
m.show()
#now add more data but leave out var
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.time": myepoch}
m.append_table(blob)
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.var4": 4, "root.mytable.data.time": myepoch}
m.append_table(blob)
m.show()
def test_table_autocreate():
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.data.var1":1,"root.data.var2":2,"root.folder.time":myepoch,"root.data.newvar":99}
m=Model()
m.append_table(blob)
m.show()
def test_create_from_path():
m=Model()
m.create_node_from_path("root.myfolder.myfolder2.var",{"type":"variable","value":33})
m.show()
def test_get_children():
m=Model()
m.create_test()
nodes = m.get_node_with_children('root.folder2')
#lastnode = '10'
#print(m.get_path(lastnode))
print(json.dumps(nodes,indent=4))
def test_create():
m=Model()
m.create_test(1)
m.show()
def test_get_forwards():#
#in this test, we check the forwards get results over folders, referencers etc.
m=Model()
m.create_node_from_path("root.folder.var1",{"type":"variable"})
m.create_node_from_path("root.folder.var2", {"type": "variable"})
m.create_node_from_path("root.folder.var3", {"type": "variable"})
m.create_node_from_path("root.ref1", {"type": "referencer"})
m.create_node_from_path("root.ref2", {"type": "referencer"})
m.add_forward_refs("root.ref1",["root.folder"])
m.add_forward_refs("root.ref2", ["root.ref1"])
m.show()
res=m.get_leaves("root.ref1")
print(res)
for k in res:
print(k["name"])
res = m.get_leaves("root.ref2")
for k in res:
print(k["name"])
def pickle_save():
import pickle
m=Model()
m.create_test(2)
# write python dict to a file
output = open('pickle_save.pkl', 'wb')
pickle.dump(m.get_model(), output)
output.close()
n=Model()
# read python dict back from the file
pkl_file = open('pickle_save.pkl', 'rb')
restore = pickle.load(pkl_file)
pkl_file.close()
print("compare after pickle restre",restore==m.get_model())
if __name__ == '__main__':
#############
#test1()
#ts_test1()
#test_template()
save_test()
pickle_save()
#plugintest()
#getnodetest()
#table_query_test()
#testfunctions_test()
#time_conver_test()
#test_create_from_path()
#table_test()
#test_table_autocreate()
#test_get_children()
#test_get_forwards()
#test_create()
#read in the commmand line options:
# demo1: create the test for the demo1, and store it in file (option2)
#
if len(sys.argv) > 1:
if sys.argv[1] == "demo1":
fileName = sys.argv[2]
print("creating demo and save as ",fileName)
m = Model()
m.create_test()
m.show()
fileName = sys.argv[2]
m.save(fileName)
| import glob
import json
import copy
import importlib
import threading
import logging
import pytz
#for tables
import numpy
import numpy as np
import datetime
import dateutil.parser
import sys
import os
import time
import uuid
import hashlib
import random
import traceback
from dates import *
# type hints
from typing import List
import modeltemplates
# for Observer
from queue import Queue
from queue import Empty
import utils
from timeseries import TimeSeriesTable
from dates import *
import inspect
from utils import str_lim
"""
next Todo
-
- execute: problem im thread mit der Ausführung
- code documentation
- google document
-
"""
sys.path.append("./plugins") #for the importlib loader, doesn't understand relative paths
#sys.path.append("./private") #for the importlib loader, doesn't understand relative paths
myGlobalDir = os.path.dirname(os.path.realpath(__file__)) # holds the directory of this script
def getRandomId():
return '%08x' % random.randrange(16 ** 8)
#used as an OOP wrapper for the flat and procedural style of the model class
class Node():
""" used as an OOP wrapper for the flat and procedural style of the model class
it is a convenient way to access nodes and their hierarchy and internals
"""
def __init__(self,myModel,myId):
""" a node can be created by calling the
mynode = model.get_node("root.mynode") or
mynode = Node(mymodel,"123")
Returns:
a node object for further access to values, hierarchy etc.
"""
self.model = myModel # this is not a copy!!
self.id = myId
def __repr__(self):
return 'Node(id={:}, value={:})'.format(self.id, self.get_value())
def get_value(self):
""" Returns:
the "value" property of the node
None if node has no "value"
"""
return self.model.get_value(self.id)
#####################
# time series node API
def get_time_series(self, start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None):
"""
Returns
dict with ["__time":[...],"values":[...]
"""
browsePath = self.model.get_browse_path(self.id)
data = self.model.time_series_get_table(variables = [browsePath],
tableDescriptor=None,
start=start,
end=end,
noBins=noBins,
includeIntervalLimits=includeIntervalLimits,
resampleTimes=resampleTimes,
format=format,
toList=toList,
resampleMethod=resampleMethod)
if data !={} :
return data[browsePath]
else:
return None
def get_raw_time_series(self,start=None,end=None):
return self.model.time_series_get_raw(self.id,start=start,end=end)
def add_references(self,targetNodes,deleteAll=False):
"""
add references from the node to the targets
Args:
targetNodes: node or list of nodes to reference to
deleteAll: if set true, we delete all existing references before creating the new
Returns
True/False for success/error
"""
if deleteAll:
self.model.remove_forward_refs(self.id)#this deletes all existing
if type(targetNodes) is not list:
targetNodes = [targetNodes]
targetIds = [node.get_id() for node in targetNodes]
return self.model.add_forward_refs(self.id,targetIds)
def set_value(self,value):
"""
special support for "column" types: if a scalar is given, we make a "full" array
"""
if self.get_properties()["type"] == "column":
if type(value) != numpy.ndarray and type(value) != list:
#we have a scalar, so we set it
#get the len of the table
timeNode = self.get_table_time_node()
length = len(timeNode.get_value())
value = numpy.full(length,value,dtype=numpy.float64)
return self.model.set_value(self.id,value)
def set_time_series(self,values=None,times=None):
"""
replaces the time series with value and times, it deletes the existing
"""
return self.model.time_series_set(self.id,values=values,times=times)
def insert_time_series(self,values=None,times=None,allowDuplicates = False):
"""
insert data, if the time stamp exists already, we replace it
"""
return self.model.time_series_insert(self.id,values=values, times=times, allowDuplicates=allowDuplicates)
def merge_time_series(self,values=None, times=None):
""" merge the times series of mergeNode into this node"""
return self.model.time_series_merge(self.id,values = values,times=times)
def delete_time_series(self,start=None,end=None):
return self.model.time_series_delete_area(self.id, start=start, end=end)
#####################
# event series node API
def get_event_series(self, start=None, end=None, format="default",eventFilter = None):
return self.model.event_series_get(self.id,start=start,end=end,format=format,eventFilter=eventFilter)
def set_event_series(self, values=None, times=None):
"""
replaces the event series with value and times, it deletes the existing
"""
return self.model.event_series_set(self.id,values=values,times=times)
def insert_event_series(self,values=None,times=None,allowEventDuplicates = False):
return self.model.event_series_insert(self.id,values,times,allowEventDuplicates=allowEventDuplicates)
def delete_event_series(self,start=None, end = None, eventsToDelete=[]):
return self.model.event_series_delete(desc=self.id,start=start,end=end,eventsToDelete=eventsToDelete)
def get_parent(self):
""" Returns:
a Node()-instance of the parent of the current node,
None if no parent available
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
return self.model.get_node(nodeInfo["parent"])
else:
return None
def get_child(self,childName):
"""
Args:
childName(nodedescription):
Returns:
a Node() instance of the child holding the childName
None if the current node does not have a child with the name childName
"""
nodeInfo = self.model.get_node_info(self.id)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.model.get_node_info(childId)
if childInfo["name"] == childName:
return self.model.get_node(childId)
return None
def delete(self):
"""
delete this node from the model, note that the object itself it not destroyed, but it is disconnected from the model
so should not be used anymore afterwards
:return:
"""
return self.model.delete_node(self.id)
def create_child(self,name=None,type="folder",value=None,properties={}):
"""
create a node under the current node, if the node exists already, we get the node
Args:
name [string] the child name
type [string] the type of the node
value [any] direct assignment of values
properies [dict] a dict with further settings of properies like value, type etc
Returns:
the node objects or none if not available
"""
if name == None:
name = '%08x' % random.randrange(16 ** 8)
id = self.model.create_node(parent=self.id,name=name,type=type,value=value,properties=properties)
if id:
return self.model.get_node(id)
else:
#we try to get it anyways
return self.get_child(name)
def get_children(self, deepLevel=1):
""" Returns:
a list of Node()-objects which are the children of the current node
args:
deepLevel: set >1 to get children and childrens' children
"""
nodeInfo = self.model.get_node_info(self.id)
children = []
if nodeInfo["children"]:
children=[self.model.get_node(id) for id in nodeInfo['children'] ]
while deepLevel>1:
deepLevel -=1
childrenOld = children.copy()
for child in childrenOld:
children.extend(child.get_children())
#remove dublicates via id:
childDict = {child.get_id():child for child in children} # same keys(id) will only be there once
children = list(childDict.values())
return children
def get_properties(self):
""" Returns:
a dictionary holding the properties of the node like {"value":123,"name":"myVariable","children":...}
"""
nodeInfo = self.model.get_node_info(self.id)
return copy.deepcopy(nodeInfo)
def get_type(self):
"""
Retuns:
the type of the node
"""
return self.get_property("type")
def get_property(self,property):
"""
Args:
property: the property name asked for
Returns:
the value of the property behind the property given
None if the property does not exist
"""
nodeDict =self.get_properties()
if property in nodeDict:
return self.get_properties()[property]
else:
return None
def set_properties(self,properties):
"""
add or modify properties of a node
Args:
properties [dict] holding key,value for the properties
Returns
True for ok, False for not done
"""
return self.model.set_properties(properties,nodeDesc=self.id)
def get_model(self):
""" this function should only be used for testing, we should never be in the need to access the model inside
Returns:
the underlying model of type Model() class
"""
return self.model
def get_target_ids(self):
""" this function returns the target ids of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
return self.get_properties()["forwardRefs"]
def get_target(self):
""" this function returns the first direct taret node of a referencer not resolving the leaves"""
if self.get_properties()["type"] == "referencer":
targets = self.get_properties()["forwardRefs"]
if targets:
return Node(self.model,targets[0])
return None
def get_targets(self):
""" this function returns the target Nodes of a referencer as a list, not resolving the leaves"""
if self.get_properties()["type"] != "referencer":
return None
targets = []
for nodeid in self.get_properties()["forwardRefs"]:
targets.append(Node(self.model,nodeid))
return targets
def get_leaves(self):
""" this function returns a list of Nodes containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all nodes which are considered leaves as a list of Node() objects
"""
leaves = self.model.get_leaves(self.id) # a list of node dicts
leaveNodes = []
for leave in leaves:
leaveNodes.append(Node(self.model,leave["id"]))
return leaveNodes
def get_leaves_ids(self):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
return self.model.get_leaves_ids(self.id)
def get_id(self):
""" Returns: the nodeid (which is generated by the system) """
return self.id
def get_browse_path(self):
""" Returns: the browsepath along the style "root.myfolder.myvariable..." """
return self.model.get_browse_path(self.id)
def get_name(self):
""" Returns: the name of the node without the path """
return self.model.get_node_info(self.id)["name"]
def get_node(self,desc):
return self.model.get_node(desc)
def get_table_time_node(self):
""" if the current node belongs to a table, then we can get the time node
a node
Returns:
(obj Node()) the node of type
"""
timeNode = self.model.find_table_time_node(self.id)
if timeNode:
return Node(self.model,timeNode)
else:
return None
def get_table_len(self):
"""
if the current node is a type "table", we get the current len
Return:
the len of the columns of the table
"""
return self.model.get_table_len(self.id)
def get_table_node(self):
"""
if the current node is a column of a time series table, we get the according table node of type "table"
Return:
a Node() of type "table" which is the table of the current node
"""
tableId = self.model.find_table_node(self.id)
if tableId:
return self.model.get_node(tableId)
else:
return None
def get_time_indices(self,startTime,endTime):
""" works only for the time node, it looks to find the timeField node of the table to which the node belongs
then tries to find start and end time inside the timeField column and returns the index (rownumber) which are
INSIDE the given startTime, endTime
Args:
startTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
endTime: the startTime to look up ,supported formats: epoch seconds, datetime object, iso string
Returns:
(numpy array) indexnumbers containing the rows of the table that fall inside the given [startTime, endTime] intervall
None for not finding table, timeField, start-endTimes whatsoever
"""
try:
startTime = date2secs(startTime)
endTime = date2secs(endTime)
times = numpy.asarray(self.get_value())
indices = numpy.where((times >= startTime) & (times <= endTime))[0]
return indices
except:
return None
def execute(self):
return self.model.execute_function(self.id)
def execute_synchronous(self):
return self.model.execute_synchronous(self.id)
def instantiate(self):
return self.model.instantiate_object(self.id)
def get_object(self):
return self.model.get_object(self.id)
def get_logger(self):
return self.model.logger
def connect_to_table(self,tableNode):
"""
connect a node to a table, it must be a column type
the node itself will be reset and filled with numpy.inf and prepared to work with the table:
an array will be created with np.inf of the current table size
and the column will be hooked to the table referencer
Returns:
True on success
"""
if self.get_property("type") != "column":
return False
#now make an array of np.inf of the current table size and apply the value
timeNode = tableNode.get_table_time_node()
if not timeNode:
return False
tableLen = len(timeNode.get_value())
self.set_value(numpy.full(tableLen,numpy.inf,dtype=numpy.float64))
#now hook it as column to the table
#check if we are part of it already
for column in tableNode.get_child("columns").get_leaves():
if column.get_id() == self.get_id():
return True
#now connect it to the table
return self.model.add_forward_refs(tableNode.get_child("columns").get_id(), [self.id],allowDuplicates=False)
def get_columns(self):
"""
get the columns nodes of a table without the time node
can be executed on the table node
Returns:
list of node objects which are the columns of the table without the time node
"""
if self.get_properties()["type"] != "table":
return None
nodes = self.get_child("columns").get_leaves()
timeNode = self.get_table_time_node()
return [node for node in self.get_child("columns").get_leaves() if node.get_id() != timeNode.get_id()]
class Observer:
# The observer needs a reference to the model, because the rest service is not able to detect
# when the client connection is closed, but the observer message handling loop can detect it
# this way the observer can detach itself from the model, when the client is disconnected
# there are two queues involved: the updateQueue holding events pushed by the observers from the model
# and the eventQueues which is the filtered updateQueue (filtering avoids sending multiple identical events in short time
def __init__(self, model):
self.model = model
# Message queues to store the new events and last time stamps
self.updateQueue = Queue()
self.eventQueues = {} # k,v = event:{"lasttimestamp":datetime,"queue":Queue()
self.minWaitTime = 0.500 #in seconds float
# use the logger of th model
self.logger = self.model.logger
self.lock = threading.RLock()
#preload queue: this is a workaround as the browser does not get the first 2 events immideately
# it actually doesn't help ..?
for i in range(2):
self.updateQueue.put({"event":"_preload","id":"","data":{"xy":str(i)}})
def update(self, event):
"""
inform about the occurrence of an event,
Args:
event "string": the
:param event:
:return:
"""
defaultEvent = {"data":"","id":"","event":""}
defaultEvent.update(event)
self.updateQueue.put(defaultEvent)
#self.logger.debug(f"Qup {id(self)} {defaultEvent['event']}, {defaultEvent['id']}")
def get_event(self):
"""
get the next event from the observerclass, this is used a generator for the webserver
we also filter out events to avoid a train of identical events
the filtering uses the self.minWaitTime, within that period we don't sent identical event;
events are "identical", if they have the same "event" and "data"
"""
self.logger.debug(f"Observer {id(self)} get_event()")
stop_event_processing = False # This flag shows when to stop the event processing
while not stop_event_processing:
try:
# Try to retrieve an item from the update queue
event = self.updateQueue.get(block=True,timeout=self.minWaitTime)
#self.logger.debug(f"event pick {event}")
#create an eventIdentification, this is used to filter out repeated events
# we select the eventIdentificton in a way that events that have unique information keeps them
# we take all information from the event.data field, so only the events WITHOUT unique data will be removed
# those are typically the tree.update events
eventIdentification = event["event"] #the event name itself
for key in event["data"]:
eventIdentification = eventIdentification+str(key)+str(event["data"][key])
#now sort this event into the queues of eventids
if eventIdentification not in self.eventQueues:
# this is a new type/identificatin of event, create an entry in the event queue
# put the event in the queue and make the last timestamp so that we send it out now
self.eventQueues[eventIdentification]={"lastTimeStamp":0,"queue":Queue()}
self.eventQueues[eventIdentification]["queue"].put(event)
except Exception as ex:
# this happens if we time out the queue get, no problem, just continue
#self.logger.error(f"Exception observer {id(self)} thread self.updateQueue.get: {ex},{str(sys.exc_info()[0])}")
pass
#now go over all the sorted event queues and check what to send out:
if 0:
#show the queues
for k,v in self.eventQueues.items():
q = v["queue"]
qLen = q.qsize()
#self.logger.debug(f"Queue {k}: len {qLen} {[q.queue[id] for id in range(qLen)]}")
try:
now = time.time()
for eventIdentification,entry in self.eventQueues.items(): # entry is {"lasttimestampe": "queue":
#self.logger.debug(f"observer {id(self)} check queue of {eventIdentification} size: {entry['queue'].qsize()},last:{entry['lastTimeStamp']}, now:{now}, ready: {now > (entry['lastTimeStamp']+self.minWaitTime)}")
if (not entry["queue"].empty()) and (now > (entry["lastTimeStamp"]+self.minWaitTime)):
#send this event, the timeout was met, we pull the first event from the queue, trash the remaining ones
"""
old code
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
myEvent = self.eventQueues[eventIdentification]["queue"].get()
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {myEvent["data"]}\n\n'
self.logger.debug(f'Observer {id(self)} sending event: {event_string}')
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
self.eventQueues[eventIdentification]["queue"].get(False)
self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
"""
self.eventQueues[eventIdentification]["lastTimeStamp"]=now
#send out this event
#pull empty the queue
if self.eventQueues[eventIdentification]['queue'].qsize():
#self.logger.debug(f"Qtrash observerinstance{id(self)} eventident {eventIdentification} size {self.eventQueues[eventIdentification]['queue'].qsize()}")
while not self.eventQueues[eventIdentification]["queue"].empty():
myEvent = self.eventQueues[eventIdentification]["queue"].get(False)
event_string = f'id:{myEvent["id"]}\nevent: {myEvent["event"]}\ndata: {json.dumps(myEvent["data"])}\n\n'
#self.logger.debug(f"Qyield {id(self)} : {myEvent}")
yield event_string
# This exception is raised when the generator function is exited, which means that the
# client side connection to the SSE stream was close, thus the observer could be removed
except GeneratorExit:
self.logger.warning(f"Observer {id(self)} connection closed.")
stop_event_processing = True
self.logger.warning(f"Observer {id(self)} exiting event processing.")
# Detach this observer from the model
self.model.detach_observer(self)
class Model:
nodeTemplate = {"id": None, "name": None, "type": "folder", "parent": None, "children": [], "backRefs": [],"forwardRefs":[],"value":None}
def __init__(self):
"""
initialize an empty Model object, it will contain the root Node as folder with Id "0"
during the initialization, also the plug-ins (all files in the ./plugin) are loaded:
all templates and functions are imported
a model holds all modelling information and data to work on
"""
self.version = 0.1
self.model = {"1":{
"name":"root",
"type":"folder",
"children":[],
"parent":"0",
"id":"1",
"backRefs":[],
"forwardRefs":[],
"version":self.version
}}
self.disableObserverCounter = 0 # a counting sema (under manual lock) for the disabling: if zero the notify_observers is active otherwise not
self.__init_logger(logging.DEBUG)
self.globalIdCounter=1 # increased on every creation of a node, it holds the last inserted node id
self.idCreationHash = True # if this is true, we create the id per hash, not per counter
self.ts = TimeSeriesTable()
self.functions={} # a dictionary holding all functions from ./plugins
self.templates={} # holding all templates from ./plugins
self.lock = threading.RLock()
self.executeFunctionRunning = False # set to true, makes sure only one functions runs at a time
self.objectClasses = {} # a dictionaryholding all object clases from the /plugins
self.import_default_plugins()
self.differentialHandles ={} # containing model_copy entries to support differential queries
self.diffHandleCounter = 0 # used only for debugging
self.differentialHandlesMaxPerUser = 10
self.currentModelName = "emptyModel" # the current name of the model
self.modelUpdateCounter = 0 #this is for the tree observer, on any change, we update the counter
self.observerStatus = {} # a dict holding the key = observerid and value : the needed status of an observer processing
self.executionQueue = Queue()
self.observers = []
self.sse_event_id = 1
self.start_function_execution_thread()
def __del__(self):
self.functionExecutionRunning = False # stop the execution thread of functions
def __init_logger(self, level):
"""setup the logger object"""
self.logger = logging.getLogger("Model-"+'%08x' % random.randrange(16 ** 8))
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
logfile = logging.FileHandler("./log/model.log")
logfile.setFormatter(formatter)
self.logger.addHandler(logfile)
self.logger.setLevel(level)
def __get_id(self, id):
"""
Args:
id (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
or a "fancy" path mixed like "1000.min" where 1000 is a node id, only the first is allowed as Nodeid, the followings are names
Returns:
(string): the node id as string
None if not found
"""
if id in self.model:
return id
#maybe a browsepath?
try:
names = id.split('.')
if names[0]=="root":
names = names[1:]
actualSearchId = "1"
elif names[0] in self.model:
#self.logger.debug(f"fancy browsepath {names}")
actualSearchId = names[0]
names = names[1:]
else:
return None
except:
return None
#now we start at root
for name in names:
nextSearchId = None
for childId in self.model[actualSearchId]["children"]:
if self.model[childId]["name"] == name:
#this is a match
nextSearchId = childId
break
if not nextSearchId:
return None
#we found it, go deeper now
actualSearchId = nextSearchId
return actualSearchId
def get_node(self,desc):
""" instantiate a Node() object on the node given as desc
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if id:
return Node(self,id)
def find_node(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return the first match
with
"""
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
continue
if self.model[id][k]!=v:
continue
return Node(self,id)
return Node(self,id)
return None
def find_nodes(self,search,matchProperty={}):
"""
the search is a match pattern for the path, we return all matches as nodes
"""
found = []
with self.lock:
for id in self.model:
if search in self.get_browse_path(id):
if matchProperty!={}:
for k,v in matchProperty.items():
if k not in self.model[id]:
break
if self.model[id][k]!=v:
break
found.append(Node(self,id))
return found
def get_node_info(self,desc,includeLongValues=True):
"""
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
includeLongValue if true, we include values for columns and files
Returns:
(dict): a dictionary holding all properties of the node includin references and children
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
#we do not include values of columns and files
if self.model[id]["type"] in ["column","file","timeseries"]:
if includeLongValues:
return copy.deepcopy(self.model[id])
else:
return {k:v for k,v in self.model[id].items() if k!="value"}
elif self.model[id]["type"]== "object":
return {k: v for k, v in self.model[id].items() if k != "object"} # don't take the "object" key
else:
#take all
return copy.deepcopy(self.model[id])
def __get_node_with_children(self,id,nodes,includeForwardRefs=True):
"""
recursive helper for get_branch
"""
if self.model[id]["type"] in ["file","column","timeseries"]:
#we do not take these values
nodes[id]={k:v for k,v in self.model[id].items() if k!="value"} # copy the whole but leave out the value
elif self.model[id]["type"] == "referencer":
nodes[id] = self.model[id]
if includeForwardRefs:
#for referencers, we take the direct targets
for targetId in self.model[id]["forwardRefs"]:
if self.model[targetId]["type"] in ["file", "column","timeseries"]:
# we do not take these values
target = {k: v for k, v in self.model[id].items() if k != "value"} # copy the whole but leave out the value
else:
target = copy.deepcopy(self.model[targetId])
#xxx todo, we might take the wrong backrefs with us, also these target nodes might not have their parent here
nodes[targetId]=target
else:
nodes[id]=self.model[id]
for child in self.model[id]["children"]:
nodes.update(self.__get_node_with_children(child,nodes,includeForwardRefs))
return nodes
def get_branch(self,desc,includeRoot=True,includeForwardRefs=True):
"""
get a branch of the model starting from desc including all children excluding:
columns
files
for referencers, we do not follow deep search for leaves, we just include the first level referenced nodes
referencers poiting to nodes that are not part of the branch will also be included
Returns:
a list of nodedicts that can be used as a full valid model again
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
nodes = {}
nodes.update(self.__get_node_with_children(id,nodes,includeForwardRefs))
#now we also need all nodes to the desc
if includeRoot:
while self.model[id]["parent"]!="0":
#the parent is not invalid so take the parent, we don't make further check for files and otheres
parentId = self.model[id]["parent"]
parentNode = copy.deepcopy(self.model[parentId])
parentNode["children"]=[id] # the other side-children are not of interest
nodes.update({parentId:parentNode})
id = self.model[id]["parent"]
return copy.deepcopy(nodes)
def __get_node_with_children_pretty(self,id,depth = None,ignore = []):
"""
recursive helper for get_branch_pretty
args:
nodes: the nodes so far
"""
#t=utils.Profiling(f"id {self.get_browse_path(id)}, ignore = {ignore}")
result = {}
node = self.model[id]
#create my properties
props = {k: copy.deepcopy(v) for k, v in node.items() if k not in ["value", "backRefs", "children"]}
if node["type"] not in ["file", "column","timeseries"]:
# we also take the value then
props["value"] = copy.deepcopy(node["value"])
if node["type"] == "referencer" and (depth is None or depth>0):
#tt = utils.Profiling("get leaves")
leaves = self.get_leaves_ids(id)
#print(tt)
#tt.start("get leaves data")
forwards = [self.get_browse_path(leaf) for leaf in leaves]
props["leaves"]=forwards
#tt.lap("1")
props["targets"] = [self.get_browse_path(id) for id in self.model[id]["forwardRefs"]]
props["leavesIds"]=leaves
props["leavesValues"] = [self.get_value(id) if self.model[id]["type"] not in ["file","column","timeseries"] else None for id in leaves]
#tt.lap("2")
validation = []
props["leavesProperties"]={}
for id in leaves:
prop = self.get_node_info(id,includeLongValues=False)
if "validation" in prop:
validation.append(prop["validation"])
else:
validation.append(None)
props["leavesProperties"][id]=prop
props["leavesProperties"][id]["browsePath"]=self.get_browse_path(id)
#tt.lap("3")
props["leavesValidation"] = validation
#print(tt)
#make sure we have the browsepath on board
if "browsePath" not in props:
props["browsePath"]=self.get_browse_path(id)
result[".properties"]=props
if depth is None or depth>0:
#now the children
nextDepth = None
if depth is not None:
nextDepth = depth -1
for childId in node["children"]:
childPath = self.get_browse_path(childId)
if any([ignoreName in childPath for ignoreName in ignore]):
#self.logger.debug(f"ignore {childPath}")
pass
else:
result[self.model[childId]["name"]]=self.__get_node_with_children_pretty(childId,nextDepth,ignore)
#print(t)
return result
def get_branch_pretty(self,desc,depth=None,ignore = []):
"""
get a branch in the form
"child1":{"child3":... ".type":, ".value"
"child2":{
the properties occurr in ".property" style, the children are direct entries
we only use names
for the referencers, the ".forwardRefs" are the leaves with full path: ["root.folder1.tzarget2","root.varibale.bare"..]
Args:
desc [string] the root node to start from
depth [int] the depth to look into
"""
with self.lock:
#p=utils.Profiling("get_branch_pretty")
id = self.__get_id(desc)
if not id: return None
res = self.__get_node_with_children_pretty(id,depth,ignore)
#self.logger.debug(p)
return res
def get_node_with_children(self,desc):
""" retrieve node information including children of the first level
Args:
desc (string): give a browsepath ("root.myfolder.myvariable") or a nodeId ("10")
Returns:
(Node()): a node object of the given node including the browsepath
None if not found
"""
with self.lock:
id = self.__get_id(desc)
if not id: return None
response = copy.deepcopy(self.model[id])
response["browsePath"]=self.get_browse_path(id)
if response["children"]!=[]:
children =[]
for childId in response["children"]:
childInfo = copy.deepcopy(self.model[childId])
childInfo["browsePath"]=self.get_browse_path(childId)
children.append(childInfo)
response["children"]=children
return response
def get_models(self):
"""
get the available model files from the disk under /models
: Returns: a list of strings
"""
try:
mydir = myGlobalDir
os.chdir(mydir) # to enable import easily
files = os.listdir(mydir + '/models')
# take only the ones with '.json, but cut the '.model.json' extension
models = [f.split('.model')[0] for f in files if f.endswith(".json")]
return models
except Exception as ex:
self.logger.error("Model.get_models() failed "+str(ex))
return []
def get_info(self):
"""
get some information about the model
Returns: (dict) key value pairs on information of the model,
"""
return {"name":self.currentModelName}
def import_plugins_from_directory(self, plugin_directory: str, check_file_marker = True):
""" find all plugins from plugin_directory.
take from there the templates from the files and the functions
Args:
check_file_marker: if set to True, we expect a "#21datalabplugin" string in the first line
"""
if plugin_directory not in sys.path:
sys.path.append(plugin_directory) # for the importlib to find the stuff
plugin_filenames = glob.glob(os.path.join(plugin_directory, '**/*.py'), recursive=True)
for fileName in plugin_filenames:
if fileName.startswith('__'):
continue # avoid __pycache__ things
#we need to check if extra plugins have the "#21datalabplugin
if check_file_marker:
absolutePath = os.path.join(myGlobalDir,fileName)
f = open(absolutePath,"r")
firstLine = f.readline()
f.close()
if firstLine != "#21datalabplugin\n":
continue
filename_relative = os.path.relpath(fileName, plugin_directory)
moduleName = os.path.splitext(filename_relative)[0].replace(os.path.sep, '.')
self.logger.info(f"import plugin lib {moduleName}")
module = importlib.import_module(moduleName)
module = importlib.reload(module) # if we change an already imported, python uses the cache, so to make sure we always get the latest, reimport here
#now analyze all objects in the module
for objName in dir(module):
if objName.startswith('__'):
continue # these are python generated info objects, we don't want them
element = getattr(module,objName)
if type(element) is dict:
#this is a template information
self.templates[moduleName+"."+objName]=copy.deepcopy(element)
elif (inspect.isclass(element)):
newClass = {"module":module,"class":element}
self.objectClasses[moduleName + "." + objName] = newClass
elif callable(element):
#this is a function, get more info
newFunction = {"module":module, "function":element}
self.functions[moduleName+"."+objName]=newFunction
def import_default_plugins(self):
""" find all plugins (= all .py files in the ./plugin folder
take from there the templates from the files and the functions
don't check them for #21datalabplugin marker
this function is execution on startup of the model
"""
self.import_plugins_from_directory(os.path.join(myGlobalDir, 'plugins'),check_file_marker=False)
def get_id(self,ids):
""" convert a descriptor or a list into only ids (which can be used as entry to the model dictionary
Args:
ids (string, list(string)): a single or list of strings containing either and id ("101") or browsepath ("root.myfolder.myvar")
Returns:
a list(id) or id as string
"""
with self.lock:
if type(ids) == type(list()):
newList = []
for id in ids:
newList.append(self.__get_id(id))
return newList
elif type(ids) == type(dict()):
newDict = {}
for oldId in ids:
id = self.__get_id(oldId)
newDict[id]=ids[oldId] #also copy the value
return newDict
else:
#assume its scalar
return self.__get_id(ids)
def get_browse_path(self,desc):
"""
Args:
desc(string): a node id or browsepatch
Returns:
(string) a browsepath
"""
with self.lock:
id = self.get_id(desc)
if not id in self.model:
return None
path = self.model[id]["name"]
while 1:
id = self.model[id]["parent"]
if id =="0":
break
else:
path = self.model[id]["name"]+"."+path
return path
def push_nodes(self,nodeDicts):
"""
push a ready nodedict into the mode
this is a dangerous function as it does not adjust references, parent/child relations whatsoever
you must take care of that yourself
"""
for nodeDict in nodeDicts:
self.logger.warning(f"pushing node {nodeDict['id'], nodeDict['name']}")
self.model[nodeDict["id"]]=copy.deepcopy(nodeDict)
self.__notify_observers([],None) # just trigger the treeupdate for now
#xxx todo notify!
def create_node(self,parent="root",type="folder",value=None,name="newNode",properties={}):
"""
create a node inside the model by giving several infos
Args:
parent: a descriptor (browsepath or id) of the parent
type: the type of the node
value: (optional) give a value for the node
name(string): a name of the node, must be unique under the parent
properties (dict): a dictionary containing further key-values to be placed into the node as properties
Returns:
(string) nodeid,
None for problem durinf creation
"""
#check if parent exists
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return None
#check if same name existst already
newpath = self.get_browse_path(parent)+"."+name
if self.get_id(newpath):
#we found it, it exists alreay, so we can't create it
return None
# we can create this node
if self.idCreationHash == True:
newId = str((random.randrange(2**64))) # a 64 bit random value
else:
self.globalIdCounter += 1
newId = str(self.globalIdCounter)
newNode = copy.deepcopy(self.nodeTemplate)
newNode.update({"id":newId,"name":name,"type":type,"parent":parentId})
if properties !={}:
newNode.update(properties)
if value != None:
newNode["value"]=value
self.model[parentId]["children"].append(newId)
self.model[newId] = newNode
if newNode["type"] == "timeseries":
self.time_series_create(newId)
if newNode["type"] == "eventseries":
self.event_series_create(newId)
if newNode["type"] == "object":
if "class" not in newNode:
newNode["class"]=None
if "autoReload" not in newNode:
newNode["autoReload"] = False # set this to true means: on a "instantiate object, we reload the module
self.__notify_observers(parentId,"children")
return newNode["id"]
def create_node_from_path(self,path,properties={"type":"variable"}):
"""
create a node from a path given, all intermediate nodes of th path given that do not yet exist are also created as folder type
Args:
path(string): the path to the node to be creates
properties(dict): the properties of the node
example:
create_node_from_path("root.myfolder.something.thisvar")
this will create myfolder as folder, something as folder, thisvar as variable and will also
set all hierarchies correctly
Returns:
(string) the nodeid created or
None if problem during creation
"""
currentNode = "root" #root
with self.lock:
for node in path.split('.')[1:-1]:
if not self.__get_id(currentNode+'.'+node):
#this one does not exist, so make it
self.create_node(currentNode,name=node)
currentNode += '.'+node
return self.create_node(parent=currentNode,name=path.split('.')[-1],properties=properties)
def create_nodes_from_template(self,parent="root",template=[]):
"""
deprecated!! this is the old style of templates as lists, now it's a dict
Create a node from a template; a template is a list of node-dicts,
Args:
parent(string): descriptor of the parent node under which the nodes of the template should be created
template: a list of node dicts of the nodes to be creates, children are allowed as dict
Returns:
(boolenan) True for created, False for error
Example:
create_nodes_from_template(parent="root.myfolder",[{"name":"myvariable1","type":"variable"},
{"name":"myfolder","type":"folder","children":[
{"name":"mysubvar","type":"variable"}]])
"""
with self.lock:
parentId = self.get_id(parent)
if not parentId:
return False
newNodeIds = [] #these must be corrected later
for node in template:
#we take all info from the nodes and insert it into the tree
nodeName = node["name"]
newNodeId = self.create_node(parentId,name=nodeName,properties=node)
newNodeIds.append(newNodeId)
#do we have "children per template syntax"?, then remove that property from the nodes and make more nodes
if "children" in self.model[newNodeId]:
savedChildren = copy.deepcopy(self.model[newNodeId]["children"])
self.model[newNodeId]["children"]=[] # empty out
for child in savedChildren:
newChildId = self.create_node(newNodeId,name=child["name"],properties=child)
newNodeIds.append(newChildId)
#now correct missing stuff
for nodeId in newNodeIds:
if self.model[nodeId]["type"]== "referencer":
# convert the path of references into an id: get the parent path, add the tail, convert to id
forwardReferences =self.model[nodeId]["forwardRefs"] #make a copy, we'll delete this
self.model[nodeId]["forwardRefs"]=[]
parentPath = self.get_browse_path(self.model[nodeId]["parent"])
for forwardRef in forwardReferences:
forwardPath = parentPath+forwardRef
self.add_forward_refs(nodeId,[forwardPath])
return True
def __create_nodes_from_path_with_children(self,parentPath,nodes):
"""
recursive helper function for create_template_from_path
e build all nodes under the parentPath on this level and then the children
we return a list of all created node ids
"""
createdNodes = []
for node in nodes:
newModelNode = {}
for k, v in node.items():
if k not in ["children", "parent", "id", "browsePath"]: # avoid stupid things
newModelNode[k] = v
newId = self.create_node_from_path(parentPath+'.'+newModelNode["name"],newModelNode)
if newId:
createdNodes.append(newId)
if "children" in node:
createdNodes.extend(self.__create_nodes_from_path_with_children(parentPath+'.'+newModelNode["name"],node["children"]))
return createdNodes
def create_template_from_path(self,path,template):
"""
Create a template from a path given, the template contains one or more nodes
the path must not yet exist!
Args:
path(string): the path under which the template will be placed. the template always contains
a root node, this will be renamed according to the path
Returns:
(boolenan) True for created, False for error
"""
with self.lock:
#first create the template root node
#we rename the template according to the path requested
template["name"]=path.split('.')[-1]
parentPath = '.'.join(path.split('.')[:-1])
newNodeIds = self.__create_nodes_from_path_with_children(parentPath,[template])
self.logger.debug(f"create_template_from_path, new nodeids: {newNodeIds}")
#now adjust the references of new nodes and of the ones that were there
for newNodeId in newNodeIds:
if "references" in self.model[newNodeId]:
#we must create forward references
for ref in self.model[newNodeId]["references"]:
# now there are two options:
# the given path is of the form templatename.levelone.leveltwo inside the template
# we replace the "templatename" with the path name the template was given
# or the path is absolute id or browsepath, then we don't modify
splitted = ref.split('.')
if len(splitted) == 1 or splitted[0]=="root":
targetPath = ref
else:
targetPath = parentPath+'.'+template['name']+'.'+'.'.join(ref.split('.')[1:])
self.add_forward_refs(newNodeId,[targetPath])
del self.model[newNodeId]["references"] # we remove the reference information from the template
def get_templates(self):
"""
give all templates loaded
Returns: a dict with entries containing the full templates
"""
with self.lock:
return copy.deepcopy(self.templates)
def add_forward_refs(self,referencerDesc,targets,allowDuplicates = True):
"""
adding forward references from a referencer to other nodes, the forward references are appended at the list
of forward references of the referencer node
references to oneself are not allowed
Args:
referenderDesc (string): descriptor of the referencer node from which we want to add forward references
targets (list(descriptors)): listof node descriptors to which we want to add forward refs
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(referencerDesc)
if not fromId:
self.logger.error("can't set forward ref on "+str(referencerDesc))
return False
if type(targets) is not list:
targets = [targets]
if targets==[]:
return True
if not self.model[fromId]["type"]=="referencer":
self.logger.error("can't set forward ref on "+str(referencerDesc)+ "is not type referencer, is type"+self.model[fromId]["type"])
return False
for target in targets:
toId = self.get_id(target)
if not toId:
continue
if toId == fromId:
continue
if not allowDuplicates:
if toId in self.model[fromId]["forwardRefs"]:
continue # ignore this forwards ref, we have it already
self.model[toId]["backRefs"].append(fromId)
self.model[fromId]["forwardRefs"].append(toId)
self.__notify_observers(fromId,"forwardRefs")
return True
def lock_model(self):
self.lock.acquire()
def release_model(self):
self.lock.release()
def get_model(self):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
"""
with self.lock:
#also add the browsepath to all nodes
for nodeid in self.model:
self.model[nodeid]["browsePath"]=self.get_browse_path(nodeid)
return copy.deepcopy(self.model)
def get_model_for_web(self,getHash=False):
"""
Returns: the full deepcopy of the internal model object (list of dictionaries of the nodes)
but leaving out the column values (this can be a lot of data)
and the file values (files are binary or strings with big size, typically serialized ML-models)
for files and columns, we either return a string "len 12344" or a sha1 hash value 133344
"""
model = {}
p=utils.Profiling("get_model_for_web")
with self.lock:
for nodeId, nodeDict in self.model.items():
if nodeDict["type"] in ["column","file","timeseries","eventseries"]:
# with columns we filter out the values
node = {}
for nk, nv in nodeDict.items():
if nk == "value":
try:
if not getHash:
node[nk] = "len " + str(len(nv))
else:
start = datetime.datetime.now()
hash = hashlib.sha1(nv.tobytes())
node[nk] = hash.hexdigest()
self.logger.debug(f"hashed {nodeDict['name']} in {(datetime.datetime.now()-start).total_seconds()} hash:{node[nk]}")
except:
node[nk] = "None"
else:
node[nk] = copy.deepcopy(nv) # values can be list, dict and deeper objects
model[nodeId] = node
elif nodeDict["type"]=="object":
node={k:v for k,v in nodeDict.items() if k!="object"}
model[nodeId]=node
else:
#this node is not a colum, can still hold huge data
model[nodeId] = copy.deepcopy(nodeDict) # values can be list, dict and deeper objects nodeDict
model[nodeId]["browsePath"] = self.get_browse_path(nodeId) #also add the browsepath
self.logger.debug(f"{p}")
return model
def remove_forward_refs(self,sourceDesc,targetDescriptors = [], deleteDuplicates=False):
"""
remove forward references from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
targets: a list of descriptors, if missing we delete all
deleteDuplicates: if set true, we delete all referenes to a target if we hae more than one reference
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
if not fromId:
return False
if not self.model[fromId]["type"] == "referencer":
return False # only for referencers
if targetDescriptors == []:
targets = self.model[fromId]["forwardRefs"].copy()
else:
targets = self.get_id(targetDescriptors)
if targets == []:
return True# nothing to do
for toId in targets:
if not toId:
continue # we skip Nones coming from the get_id
if deleteDuplicates:
# maybe multiple entries
while toId in self.model[fromId]["forwardRefs"]: # maybe multiple entries
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
else:
# we delete only one entry
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId,"forwardRefs")
return True
def remove_forward_ref(self,sourceDesc,targetDesc):
"""
remove a forward reference from a referencer, this also removes the backreference from the target
Args:
sourceDesc: the descriptor of the referencer node
Returns:
True/False for success
"""
with self.lock:
fromId = self.get_id(sourceDesc)
toId = self.get_id(targetDesc)
if not fromId or not toId:
return False
if not self.model[fromId]["type"]=="referencer":
return False # only for referencers
try:
self.model[fromId]["forwardRefs"].remove(toId)
self.model[toId]["backRefs"].remove(fromId)
self.__notify_observers(fromId, "forwardRefs")
return True
except:
return False
def remove_back_ref(self,sourceDesc,targetDesc):
"""
remove a backwards reference from any node to a referencer, this also removes the forwardreferece from the target
actually, this function is just a helper. Normally, we only talk about "forward references";
each forward reference also creates a backwards reference in the model, but this is just for internal look up speed
the reference here is targetDesc -> (forwardRef) -> sourceDesc
Args:
sourceDesc: the descriptor of the node that holds a backwards reference
targetDesc: the descriptor of the node that holds the forward reference
Returns:
True/False for success
"""
with self.lock:
return self.remove_forward_ref(targetDesc,sourceDesc)
def add_property(self,nodeDesc,property,value):
"""
add a random property entry for a node, a node is a key-value store, a property is a key with a value
Args:
nodeDesc: the descriptor of the node
property: the key to be created on the node
value: the value to be stored for this property
Returns:
True for create
False for node not found or if the property already exists
"""
with self.lock:
id = self.get_id(nodeDesc)
if not id:
return False
if property in self.model[id]:
return False # have this property already
self.model[id][property]=value
self.__notify_observers(id, property)
return True
def set_properties(self,properties={},nodeDesc=None):
"""
changes a random set of properties given by the dict or adds them if not existant, some properties are not allowed here:
children, parent, forward and back ward refs, allowed are all others including type, name, value
Args:
nodeDesc: the descriptor of the node, is optional, can also be given as browsePath or id in he properties dict
properties: the new properties or changed
Returns:
True for done
False for node not found or if the property already exists
"""
with self.lock:
if nodeDesc:
id = self.get_id(nodeDesc)
elif "id" in properties:
id = properties["id"]
elif "browsePath" in properties:
id = self.get_id(properties["browsePath"])
else:
self.logger.error("set properties is missing id ")
return False
if not id:
return False
notificationProperties = []
for k,v in properties.items():
if k in ["id","browsePath","children","parent","forwardRefs","backRefs"]:
continue # we ignore these entries
self.model[id][k]=v # overwrite or set new
notificationProperties.append(k)
self.__notify_observers(id,notificationProperties)
return True
def find_all_children_recursive(self,nodeIds):
""" find all children recursively, give a list of """
with self.lock:
children = []
for id in nodeIds:
if self.model[id]["children"]:
children.extend(self.find_all_children_recursive(self.model[id]["children"]))
children.append(id)
return children
#delete node and all subnodes
def delete_node(self,desc):
"""
delete a node and all its recursive children;
flow:
1) make a list of all nodes to be deleted
2) rip off all references to /from delete nodes
3) delete all nodes
4) notify observers about children change on the delete nodes
desc(string): the descriptor of the node
Returns:
True for success
False for node not found
"""
with self.lock:
id = self.get_id(desc)
if not id:
return False
nodesToDelete = self.find_all_children_recursive([id])
self.logger.debug(f"delete nodes {nodesToDelete}")
childNotify = []
#first rip off all references
for id in nodesToDelete:
forwards = self.model[id]["forwardRefs"].copy()
backwards = self.model[id]["backRefs"].copy()
for forward in forwards:
self.remove_forward_ref(id,forward) # this will also trigger observers
for backward in backwards:
self.remove_back_ref(id,backward) # this will also trigger observers
#now delete the acutal nodes
for id in nodesToDelete:
parentId = self.model[id]["parent"]
if parentId in self.model:
self.model[parentId]["children"].remove(id)
childNotify.append(parentId)
if self.model[id]["type"]=="timeseries":
self.time_series_delete(id)
del self.model[id]
#now notify only those who still exist
goodNotify=[]
for id in childNotify:
if id in self.model:
goodNotify.append(id)
if goodNotify:
self.__notify_observers(goodNotify, "children") # make ONE call for the observers
return True
# if desc.type is a var, function then we just set the value
# if it's a timeseries" then we set a column in a table, padded if needed
def set_value(self,desc,value):
"""
set the value property of a node, if the node does not have a value property yet, it is created here
Args:
desc(string): node descriptor
value (any): any value to be stored
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
#convert if table:
if self.model[id]["type"] == "column":
value = numpy.asarray(value,dtype=numpy.float64)
self.model[id]["value"] = value
self.__notify_observers(id,"value")
return True
def get_value(self,desc):
"""
read out the "value" property of a node
Args:
desc(string): the node that holds the value
Returns:
the value
None if the node has no "value" property
"""
with self.lock:
id = self.get_id(desc)
if not id: return None
if self.model[id]["type"] == "timeseries":
values = self.time_series_get_table(id)
if values:
return self.time_series_get_table(id)[id]["values"]
else:
return None
if "value" in self.model[id]:
return copy.deepcopy(self.model[id]["value"])
else:
return None
def __copy_node(self,id,resolveChildren=False):
"""
get a copy of a node, we don't create a node in the model here!
copy node with all properties, if the node is a "column", we don't copy the value
if the resolveChildren is set to true, we also copy the direct children
the copied node can't be used to create a node, as it is the copy of an existing node!
Args:
id (string): the node id to be copied
resolveChildren (bool): False to not copy the children (the new node has no children)
True to copy-create also the children
Return:
(dict) the node
"""
newNode = {}
for key in self.model[id]:
if key == "value" and self.model[id]["type"]in ["column","file","timeseries"]:
newNode["value"]=None
elif key == "children" and resolveChildren:
#we also copy the children
newNode["children"]=[]
for childId in self.model[id]["children"]:
childNode = self.__copy_node(childId)
newNode["children"].append(childNode)
else:
newNode[key]=copy.deepcopy(self.model[id][key])
return newNode
def __get_targets(self,id):
"""
#this is a recusive helper function for the get_leaves function
"""
targets=[]
if self.model[id]["type"] == "referencer":
for targetId in self.model[id]["forwardRefs"]:
targets.extend(self.__get_targets(targetId))
elif self.model[id]["type"] == "folder":
for targetId in self.model[id]["children"]:
targets.extend(self.__get_targets(targetId))
else:
addNode = self.__copy_node(id,resolveChildren=True)
addNode["browsePath"]=self.get_browse_path(id)
targets = [addNode]
return targets
def get_leaves_ids(self,desc):
"""
get the list of ids of the leaves, see get_leaves()
Returns:
a list of ids of the leaves
"""
leaves = self.get_leaves(desc) # a list of node dicts
leaveIds = []
for leave in leaves:
leaveIds.append(leave["id"])
return leaveIds
def get_leaves(self,desc,allowDuplicates=False):
"""
this function returns a list of dicts containing the leaves where this referencer points to
this functions works only for nodes of type "referencer", as we are following the forward references
leaves are defined as following:
1) all nodes that are listed under the forward references and which are not of type referencer or folder
2) if nodes pointed to are referencer, the targets are again analyzed
3) if a node pointed to is a folder, all children of the folder are taken which are not referencer or folder themselves
folders and referencers inside the folder are not taken into account
doing so, hierarchies of referencers are unlimited, hierarchies of folders are only of depth 1
Returns:
all node dicts which are considered leaves as a list of node dicts
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
targets=self.__get_targets(id)
if targets and targets[0]["id"] == id:
#this can happen if the node is not a folder, ref and had no children
targets.pop(0)
#before we return, we remove duplicates if wanted
if targets and allowDuplicates == False:
reducedTargets = []
ids = []
for t in targets:
if t["id"] in ids:
continue
reducedTargets.append(t)
ids.append(t["id"])
return reducedTargets
else:
return targets
def __get_referencer_parents(self,ids):
backRefs = []
#we look back from this node
for id in ids:
if self.model[id]["type"] == "referencer":
#we take this one in
backRefs.append(id)
#plus we look further up
thisBackRefs = self.model[id]["backRefs"]
if thisBackRefs:
backRefs.extend(self.__get_referencer_parents(thisBackRefs))
return backRefs
def get_referencers_old(self,desc):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deep: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
ids = [self.model[id]["parent"],id]
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def get_referencers(self,desc,deepLevel = 1):
"""
find the referencers pointing to a node via the "leaves algorithm"
initially, we take the parent and the backref referencers
Args:
deepLevel: we support the reverse leave-algorithms including any depth of children level after the last referencer,
e.g. a leaves-path of referencer -> referencer -> nodes -> child ->child is a valid match
we give the number of parent levels to include in the search at the leaves
default is 1, so the node itself and its parent
"""
with self.lock:
id = self.__get_id(desc)
if not id:return None
if not deepLevel:
ids = [self.model[id]["parent"],id]
else:
ids = self._get_parents(id,deepLevel)
if "0" in ids:
ids.remove("0")
referencers = self.__get_referencer_parents(ids)
return referencers
def _get_parents(self,id,deepLevel = -1):
ids = []
while id != "1" and deepLevel >= 0:
ids.append(id)
deepLevel -=1
id = self.model[id]["parent"]
return ids
#get a table with values like in the table stored, start and end times are optional
# if start, end not given, then we get the full table with no postprocessing at all
def get_timeseries_table_old(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,includeBackGround=None):
with self.lock:
variables = self.get_id(variables)
return self.timeSeriesTables.get_value_table(variables, startTime=startTime, endTime=endTime, noBins=noBins,
agg=agg,
includeTimeStamps=includeTimeStamps) # ,startTime,endTime)
'''
if startTime == None and endTime ==None:
#request the full table
variables = self.get_id(variables) # convert all to ids
return self.timeSeriesTables.get_value_table(variables,startTime=startTime,endTime=endTime,noBins=noBins,agg=agg,includeTimeStamps=includeTimeStamps)#,startTime,endTime)
else:
# this is a more details request, we will try to deliver the data in bins and with
# aggretation postprocessing
variables = self.get_id(variables) # convert all to ids, not browsepath
return self.timeSeriesTables.get_value_table(variables,startTime,endTime,noBins,agg,includeTimeStamps=includeTimeStamps)
'''
#used in the Node class, give a column variable or the table itself, return the nodeid of the time variable of that table
def find_table_time_node(self,desc):
with self.lock:
table = self.__find_table(self.get_id(desc))
if not table:
return None
pathToTimeIndex = self.get_browse_path(table)+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id'] # this referencer must point to only one node
return timeColumnId
def find_table_node(self,desc):
"""
get the node id of a table giving a column node of the table as input
Args
desc[string]: a node descriptor of a column node belonging to the table
Returns:
the node id of the table node
"""
with self.lock:
return self.__find_table(desc)
def get_child(self,desc,childName):
"""
get a child based on the name given
Args:
desc: node descriptor of the node under which we look for children
name: the child name to look for
Returns:
a nodeid if we find the child with "name" under the desc or none if not found
:return:
"""
with self.lock:
nodeInfo = self.get_node_info(desc)
if nodeInfo:
for childId in nodeInfo['children']:
childInfo = self.get_node_info(childId)
if childInfo["name"] == childName:
return childId
return None
def get_children_dict(self,desc):
"""
create a dictionary with key= childName and value = nodedict
Args:
desc: the nodedescriptor
Returns:
a dict
"""
with self.lock:
childrenDic={}
id = self.get_id(desc)
if not id:
return None
for childId in self.model[id]["children"]:
child = self.get_node_info(childId)
childrenDic[child["name"]]=child
return childrenDic
def get_table_len(self,desc):
"""
get the current length of a table
Args:
desc: the node descriptor of type table
Returns:
the current length of the columns of the table, none if error
"""
with self.lock:
tableId = self.get_id(desc)
if not tableId: return None
if not self.model[tableId]["type"]=="table": return None
try:
columnid = self.get_child(tableId,"columns")
if not columnid: return None
columnIds = self.get_leaves_ids(columnid)
if columnIds:
return len(self.model[columnIds[0]]["value"])
except:
return None
def get_timeseries_table(self,variables,startTime=None,endTime=None,noBins=None,agg="sample",includeTimeStamps=None,format="array",includeBackGround=None):
"""
get a time series table from variables. The table is returned as a list[list] object
all variables requested must be of type "column" and must belong to the same table:
all columns requested here must have a direct backreference to the same node of type "columns"
todo: also allow "columns" to point to folders or multiple hierarchies of referencing/folders
Args:
variables (list(nodedescriptors)): nodes to be part the data table requested (ordered!)
startime, endTime: the start and endtime of the table given as seconds since epoch
#we also allow the special case of endTime = 0 and startTime = -interval
# we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
agg(string): the aggregation function to be used when we downsample the data,
"sample": this means, we just pick out values (we sample) the data set, this is actually not an aggregation
includeTimesStampe (bool): currently ignored
includeBackGround (bool): currently ignored
Returns(dict)
key : value
"__time" : list of timestamps for the returned table in epoch seconds
"variable1": the list of float values of one of the requested variables
"""
with self.lock:
#first check if all requested timeseries are columns from the same table
vars = self.get_id(variables)
table = []
for var in vars:
if self.model[var]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(var))
if len(set(table)) != 1 or set(table)== {None}:
self.logger.warning("not the same table")
return False
#get the time field, and make fancy indexing via numpy arrays
pathToTimeIndex = self.get_browse_path(table[0])+".timeField"
timeColumnId = self.get_leaves(pathToTimeIndex)[0]['id']
if startTime and endTime:
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where((times>=startTime) & (times<=endTime))[0]
#xxx todo find the right index
elif startTime and not endTime:
#special cases for [-startTime:] and [startTime:] requests
if startTime < 0:
#this is the special case that we take an interval from the end
endTime = self.model[timeColumnId]["value"][-1]# the last
startTime = endTime +startTime # as startTime is negative this is actually substraction
else:
#starttime is positive
pass
times = numpy.asarray(self.model[timeColumnId]["value"])
indices = numpy.where(times >= startTime)[0]
else:
indices = numpy.arange(0,len(self.model[timeColumnId]["value"])) ## all indices
#now resample the indices to have the right bins number
if noBins:
varIndices = np.linspace(indices[0], indices[-1], noBins, endpoint=False, dtype=int)
else:
varIndices = indices
if format=="array":
result = []
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data=original.tolist() # apply the selection with the indices list
result.append(data)
else:
result = {}
for var in variables:
original = np.asarray(self.model[self.get_id(var)]["value"])[varIndices] # fancy indexing
data = original.tolist() # apply the selection with the indices list
result[var]=data
result["__time"]=np.asarray(self.model[timeColumnId]["value"])[varIndices].tolist()
return result
def add_timeseries(self,blob,fast=False):
"""
add a dictionary of variables to a table, we check if the variables belong to the same table
also, times that come in as datetime object are converted to epoch seconds
Args:
blob (dict): a dictionary containing keys (node descriptors) and values (scalars)
Returns:
True/False for success
"""
with self.lock:
table = []
for key in blob:
id = self.get_id(key)
if not id:
self.logger.warn("add_timeseries count not find the variable:" + str(key))
return False
if self.model[id]["type"] != "column":
self.logger.warn("requested time series but not column type")
return False
table.append(self.__find_table(id))
if len(set(table)) != 1 or set(table) == {None}:
self.logger.warn("not the same table")
return False
#here, the request is parsed as ok, let's put the values
for key in blob:
id = self.get_id(key)
value = blob[key]
if type(self.model[id]["value"]) is not list:
self.model[id]["value"]=[]
#we auto-convert time stamps
if type(value) is datetime.datetime:
value = date2secs(value)
self.model[id]["value"].append(value)#finally put the value
#return the id of the table, give a column variable
def __find_table(self,desc):
"""
return the node id of the table, give a column variable
!! this has no lock, must be called under lock
Args:
desc(string): node descriptor of type column or the table itself
Returns:
the node id of the table to which the desc node belongs
"""
id = self.get_id(desc)
if not id: return False
if self.model[id]["type"] == "table":
return id
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
return self.model[ref]["parent"]
return None
def ts_table_add_blob(self,dataBlob):
"""
this function add a data blob to an existing table, it accepts multiple values at once to speed up internals
Args:
dataBlob (dict or list(dict)): containing key:value pair with key=a descriptor of a column of one table
value: a scalar or list or numpy array of values
"""
if type(dataBlob) is list:
self.logger.error("currently no support for list blobs")
return None
with self.lock:
#first find the table and decide for the type conversion
for key in dataBlob:
if key != '__time':
tableId = self.__find_table(key)
break
if not tableId:
self.logger.error("can't find the table of "+str(dataBlob[list(dataBlob.keys())[0]]))
tableNode =self.get_node(tableId)
columnsType = numpy.float64 # this is the default
# make sure the time is there and convert it: we accept datetime objects, iso strings or floats seconds
# plus, the key will be the time node id afterwards
timeNode = tableNode.get_child("timeField").get_leaves()[0]
#try to find the time entry in the dataBlob, rename it to the timenode id
timeKeyOptions = ['__time',timeNode.get_browse_path(),timeNode.get_id()]
for timeKeyOption in timeKeyOptions:
if timeKeyOption in dataBlob:
dataBlob[timeNode.get_id()] = dataBlob.pop(timeKeyOption) # from now on the time field is names as its browsepath
break
if timeNode.get_id() not in dataBlob:
self.logger.error("time field entry missing")
return False
#now check if all are on the same table and convert the keys to node ids
variables = list(dataBlob.keys())
for var in variables:
if self.__find_table(var) != tableId:
self.logger.error("variables are not on the same table")
return False
id = self.get_id(var)
if id != var:
dataBlob[self.get_id(var)]=dataBlob.pop(var) # make new entry as nodeid
#now check the sizes of the incoming data and convert them to the requested type
inputSizes = set()
for key,value in dataBlob.items():
if key == timeNode.get_id():
#if we handle the time node, we might have to convert
if type(value) is list or type(value) is numpy.ndarray:
newValues = []
#newValues = numpy.asarray([],dtype=numpy.float64)
for val in value:
newValues.append(date2secs(val))
dataBlob[key] = numpy.asarray(newValues,dtype=numpy.float64) # write it back to the data
else:
#it is a scalar
dataBlob[key] = numpy.asarray([date2secs(value)],dtype=numpy.float64)
else:
if numpy.isscalar(dataBlob[key]):
dataBlob[key]=numpy.asarray([dataBlob[key]],dtype=columnsType) # make a list if it is scalar
else:
dataBlob[key]=numpy.asarray(dataBlob[key],dtype=columnsType) # if it is a numpy array already, numpy makes no copy
inputSizes.add(dataBlob[key].shape[0])
if len(inputSizes)!=1:
self.logger.error("incoming data has different len, can't hande as padding is unclear")
# when we are here, we have converted all incoming data ot numpy arrays, all belong to the same table
# and all have the same length, we are ready to put them inside
#print("through")
#now append them
return self.__ts_table_add_row(dataBlob,tableNodeId=tableId)
def __ts_table_add_row(self,dataBlob,tableNodeId=None,autoPad=True,pad=numpy.NaN):
"""
must be called under lock !!
this function accepts a dataBlob which is ready to be inserted, we don't make any more checks here
it must use variables from one table, it must contain data as numpyarrays
variables of the tables which are missing will be filled with pad if autoPad is true
"""
if not tableNodeId:
tableNode = self.get_node(self.__get_table(list(dataBlob.keys())[0]))
else:
tableNode = self.get_node(tableNodeId)
dataLen = dataBlob[list(dataBlob)[0]].shape[0]
columnNodes = tableNode.get_child("columns").get_leaves()
for columnNode in columnNodes:
id = columnNode.get_id()
if id in dataBlob:
#we add that one to the table
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = dataBlob[id]
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],dataBlob[id])
else:
#we must pad
self.loger.debug("we are padding "+id+" with % ",dataLen)
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]=numpy.full(dataLen,numpy.nan)
else:
self.model[id]['value'] = numpy.append(self.model[id]['value'],numpy.full(dataLen,numpy.nan))
return True
def append_table(self,blob,autocreate=True,autopad=True, timeSorted = False):
"""
this function accepts a dictionary containing paths and values and adds them as a row to a table
if autoPad is True: it is allowed to leave out columns, those will be padded with numpy.inf,
if autocreate is True: it is allowed to add unknown colums, those will be added automatically under the given name
Args:
blob(dict):
keys: node descriptors,
values: value to be appended to the table (scalar or list per variable is allowed
the times should be given in a variable ending with ".time"
if the table exists already and has another node for the time-values, then we take the .time values and put them on the timenode
autocreate(bool): if set to true and the nodes or table in the dict do not exist yet, we autocreate a table
autopad(bool) if set to true, we automatically pad values in an existing table if variables of the table are not part of the blob
doing so, we keep consistent lenght for all columns of a table
"""
#first check if we need to autocreate something, also check if we have multiple tables in play
with self.lock:
autocreates = []
tableId = None
columnsId = None
numberOfRows = None
for key in blob:
id = self.__get_id(key)
if not id:
if not autocreate:
self.logger.warn("appending table with unknown variables")
return None
else:
#we create this thing later
autocreates.append(key)
else:
#the id was found, let's find the right table
for ref in self.model[id]["backRefs"]:
if self.model[ref]["name"] == "columns":
#this is our table
if not tableId:
tableId = self.model[ref]["parent"]
columnsId = ref
numberOfRows = len(self.model[id]["value"])
else:
if tableId != self.model[ref]["parent"]:
self.logger.warn("mixed tables request")
return None
self.logger.debug("append table "+str(self.get_browse_path(tableId)))
if autocreates and autocreate:
#do we even have to create our table?
if not tableId:
#make a table structure based on the names given
tableName = autocreates[1].split('.')[1]+"_autotable"
tableId = self.create_node(parent="root",name=tableName,properties={"type":"table"})
columnsId = self.create_node(parent=tableId,name="columns",properties={"type":"referencer"})
timeId = self.create_node(parent=tableId, name="timeField", properties={"type": "referencer"})
numberOfRows=0
else:
#if we don't create the table, here is our timeId
timeReferencer = self.get_child(tableId, "timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
#we also then don't create any new time-field
autocreates = [path for path in autocreates if path[-5:]!=".time"]
self.logger.debug(f"table var autocreates: {autocreates}")
for path in autocreates:
id = self.create_node_from_path(path,properties={"type":"column"})
self.model[id]["value"]=numpy.full(numberOfRows,numpy.inf)
self.add_forward_refs(columnsId,[id])
if path.split('.')[-1]=="time":
#we just created the time field, we must also give the table struct the info
self.add_forward_refs(timeId,[id])
tableColumnIds = self.get_leaves_ids(columnsId) # a list of the ids of the columns
timeReferencer = self.get_child(tableId,"timeField")
timeId = self.get_leaves_ids(timeReferencer)[0]
timePath = None
for path in blob:
if path[-5:] == ".time":
timePath = path
if not timePath:
self.logger.error("no time path given")
return False
#now make arrays of all values
for k,v in blob.items():
if type(v) is list or type(v) is numpy.ndarray:
blob[k]=numpy.asarray(v,dtype=numpy.float64)
else:
blob[k] = numpy.asarray([v], dtype=numpy.float64)
valuesLen = len( blob[list(blob.keys())[0]] )
tableLen = len ( self.get_value(timeId))
if not timeSorted:
#just append
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.append(self.model[id]["value"],blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.append(self.model[id]["value"],numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#now trigger observser
self.__notify_observers(self.get_leaves_ids(columnsId),"value")
else:
#time sorted: find a place to insert the data in the times
currentTimes = numpy.asarray(self.get_value(timeId),dtype=numpy.float64)
startTime = blob[timePath][0]
endTime = blob[timePath][-1]
firstIndexGreaterStart, = numpy.where(currentTimes>startTime) #where returns tuple
if len(firstIndexGreaterStart) == 0:
firstIndexGreaterStart = tableLen
else:
firstIndexGreaterStart=firstIndexGreaterStart[0]
firstIndexGreaterEnd, = numpy.where(currentTimes > endTime)
if len(firstIndexGreaterEnd) == 0:
firstIndexGreaterEnd = tableLen
else:
firstIndexGreaterEnd=firstIndexGreaterEnd[0]
if firstIndexGreaterEnd != firstIndexGreaterStart:
self.logger.error("we can't insert the data in a row-wise time manner, only as block")
return False
startIndex = firstIndexGreaterStart # the position to insert the incoming data
self.logger.debug(f"insert data @{startIndex} of {tableLen}")
for path in blob:
if path.split('.')[-1]=="time":
id = timeId # we take the existing time Node of the table instead of just the variable named "time"
else:
id = self.get_id(path) # here
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,blob[path]) #todo: this is a very inefficient copy and reallocate
if id in tableColumnIds:
tableColumnIds.remove(id)
#append this value
for id in tableColumnIds:
self.model[id]["value"] = numpy.insert(self.model[id]["value"],startIndex,numpy.full(valuesLen,numpy.inf,dtype=numpy.float64)) # pad the remainings with inf
#
pass
return True
def __show_subtree(self,rootId):
currentBrowsePath = self.get_browse_path(rootId)
indentation = "| "*(len(currentBrowsePath.split('.'))-1)
print (indentation+"-",self.model[rootId]["name"],end="")
noShowProperties=["name","parent","children"]
for property in self.model[rootId]:
try:
if property=="value" and len(self.model[rootId]["value"])>10:
print(",len:"+str(len(self.model[rootId]["value"])),end="")
except:
pass
if not property in noShowProperties:
try:
#if this entry has a len and the len is larger then 20, show only a part of it
if len(self.model[rootId][property]) > 10:
print("," + property + "=" + str(self.model[rootId][property][0:10])+"...("+str(len(self.model[rootId][property]))+")", end="")
else:
print("," + property + "=" + str(self.model[rootId][property]), end="")
except:
print("," + property + "=" + str(self.model[rootId][property]), end="")
if self.model[rootId]["type"]=="timeseries":
print(","+self.time_series_get_info(rootId), end="")
print("")
for child in self.model[rootId]["children"]:
self.__show_subtree(child)
def execute_object_function(self,desc,functionName,parameter=None):
with self.lock:
id = self.get_id(desc)
object = self.get_object(id)
if not object:
return False
try:
functionPointer = getattr(object,functionName)
self.executionQueue.put({"functionPointer":functionPointer,"parameter":parameter,"id":id})
return True
except:
self.logger.error(f"function {functionName} not sttr of object {desc} {object}")
return False
def execute_function(self,desc,parameter = None):
"""
create a thread to execute a function there,
if the function has autoReload, we re-import the external
file
Args:
desc: node descriptor of the node (type "function") to be executed
Returns:
True if the execution thread was launched
"""
with self.lock:
id = self.get_id(desc)
if self.model[id]["type"]!= "function":
return False
functionName = self.model[id]["functionPointer"]
if not functionName in self.functions:
self.logger.error(f"can't find function {functionName} in global list")
return False
functionNode = self.get_node(id)
executionType = functionNode.get_child("control").get_child("executionType").get_value()
if executionType in ["async","sync"]:
self.executionQueue.put(id)
self.logger.info(f"function {desc} queued for execution")
return True
elif executionType =="threaded":
self.logger.info(f"function {desc} started in thread")
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
else:
self.logger.error(f"function {desc} cant be started, unknown execution type {executionType}")
return False
#check if function is interactive, then we reload it right now
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
#if self.functions[functionName]["isInteractive"]:
# must reload the module
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module,functionName.split('.',1).pop())
#now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#here, the lock is open again!
try:
if executionType == "async" or executionType == "threaded":
thread = threading.Thread(target=self.__execution_thread, args=[id])
thread.start()
return True
elif executionType == "sync":
self.__execution_thread(id) # call it sync here
return True
else:
self.logger.error("unsupported execution type"+str(executionType)+" in fuction"+str(id))
raise(Exception)
except:
return False
def start_function_execution_thread(self):
self.functionExecutionRunning = True
self.functionExecutionThread = threading.Thread(target=self._function_execution_thread)
self.functionExecutionThread.start()
def _function_execution_thread(self):
while self.functionExecutionRunning:
try:
nextId = self.executionQueue.get(timeout=1)
self.logger.info(f"now executing function {str_lim(nextId,300)}")
self.__execution_thread(nextId)
except:
pass
def delete(self):
self.functionExecutionRunning = False
def exit(self):
self.delete()
def close(self):
self.delete()
def __dispatch(self,function,timeout,param):
thread = threading.Thread(target=self.__dispatch_thread_function, args=[function,timeout,param])
thread.start()
def __dispatch_thread_function(self,function,timeout,param):
time.sleep(timeout)
function(param)
#exit thread
def reset_progress_bar(self,controlNode):
controlNode.get_child("progress").set_value(0)
def __clone_children(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
for childName,childInfo in self.get_children_dict(source).items():
childId = childInfo["id"]
if childInfo["type"] in ["timeseries","file","column"]:
self.logger.debug(f"clone skip node {childInfo['name']}")
continue
newProps = {k:v for k,v in childInfo.items() if k not in ["parent","children","backRefs","forwardRefs","browsePath","id","name"]}
cloneId = self.create_node_from_path(destPath+"."+childInfo["name"],properties=newProps)
grandChildren = self.get_children_dict(childId)
if grandChildren != {}:
self.__clone_children(childId,cloneId)
def __clone_referencer_targets(self,source,dest):
""" see def clone() for more info """
sourcePath = self.get_browse_path(source)
destPath = self.get_browse_path(dest)
childIds = self.get_node_info(sourcePath)["children"]
while childIds:
id = childIds.pop()
info = self.get_node_info(id)
if info["type"]=="referencer":
newreferencer = self.get_browse_path(id).replace(sourcePath, destPath)
#now check: if the referencers points to something inside, we do the same but in the target root, else we take it as it is
for targetId in info["forwardRefs"]:
targetPath = self.get_browse_path(targetId)
newTargetPath = targetPath.replace(sourcePath,destPath)# if not found, we get it unchanged
self.add_forward_refs(newreferencer,[newTargetPath])
childIds.extend(info["children"])
def clone(self,desc):
"""
clone a node and all its subnodes (a whole branch)
we will create all nodes which existed in the source branch, for the referencers we use this stategy:
references pointing to a node under the source branch will be translated to references in the target branch
poining to the corresponding new node in the target branch
references pointing to outside the source branch will also be created in the cloned branch pointing to
the same target
Args:
desc: the source node descriptor
"""
sourcePath = self.get_browse_path(desc)
if not sourcePath:
return False
targetPath = sourcePath+"_"+getRandomId()
sourceInfo = self.get_node_info(desc)
transferRoot = self.create_node_from_path(targetPath,properties={"type":sourceInfo["type"]})
#now iterate over the nodes and children and create the same nodes
self.__clone_children(desc,transferRoot)
self.__clone_referencer_targets(sourcePath,transferRoot)
return True
def execute_synchronous(self,id):
"""
execute a function synchronously here (this can be useful when executing a function within another
"""
return self.__execution_thread(id)
def __execution_thread(self,id):
"""
the thread function to execute functions
it currently uses the global lock so it will lock out any other work on the model during execution
all inputs and outputs are found in the model
we also set the status and result from here, not needed to do that in the function
Args:
id: the node id of the function to be executed or the dict for an object call
"""
try:
if type(id) is str:
if self.model[id]["type"] == "function":
isFunction = True
else:
isFunction = False
with self.lock:
if isFunction:
if self.model[id]["autoReload"] == True and self.global_auto_reload_enabled():
# must reload the module
functionName = self.model[id]["functionPointer"]
module = importlib.reload(self.functions[functionName]["module"])
functionPointer = getattr(module, functionName.split('.', 1).pop())
# now update our global list
self.functions[functionName]["module"] = module
self.functions[functionName]["function"] = functionPointer
#self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
#check the function
functionName = self.model[id]["functionPointer"]
functionPointer = self.functions[functionName]['function']
self.logger.info(f"in execution Thread {threading.get_ident()}, executing {id} {functionName}")
else:
functionPointer = id["functionPointer"]
functionName = functionPointer.__name__
parameter = id["parameter"]
id = id["id"] #for deeper down
#now set some controls
try:
node = self.get_node(id)
controlNode = node.get_child("control")
targetId = self.get_id("root.system.progress.targets")
if targetId:
self.disable_observers()
self.remove_forward_refs(targetId)
self.add_forward_refs(targetId,[controlNode.get_child("progress").get_id()])
self.enable_observers()
# we don't signal these things
self.disable_observers()
controlNode.get_child("status").set_value("running")
controlNode.get_child("result")#.set_value("pending")
controlNode.get_child("progress").set_value(0)
#controlNode.get_child("signal").set_value("nosignal")
startTime = datetime.datetime.now()
controlNode.get_child("lastStartTime").set_value(startTime.isoformat())
self.enable_observers()
except:
self.logger.error("error during execution preparation, this can be critical, maybe disabled observers")
self.log_error()
pass
# model lock open: we execute without model lock
if isFunction:
result = functionPointer(node) # this is the actual execution
else:
result = functionPointer(parameter)
#now we are back, set the status to finished
duration = (datetime.datetime.now()-startTime).total_seconds()
with self.lock:
# this is a bit dangerous, maybe the node is not there anymore?, so the
# inner functions calls of node.xx() will return nothing, so we try, catch
try:
self.logger.debug(f"function {functionName} execution completed in {duration} ")
self.disable_observers() # we don't signal these
controlNode.get_child("lastExecutionDuration").set_value(duration)
controlNode.get_child("status").set_value("finished")
controlExecutionCounter = controlNode.get_child("executionCounter")
controlExecutionCounter.set_value(controlExecutionCounter.get_value() + 1)
controlProgress = controlNode.get_child("progress")#.set_value(0)
controlProgress.set_value(0)
self.enable_observers()
self.notify_observers([controlExecutionCounter.get_id(),controlProgress.get_id()],"value")
if not isFunction:
result = True # for execution of member function we don't have a general return code
if result == True:
controlNode.get_child("result").set_value("ok")
self.publish_event("result of " + str(functionName) + ": " + controlNode.get_child("result").get_value())
else:
if controlNode.get_child("result").get_value() == "pending":
#if the functions hasn't set anything else
controlNode.get_child("result").set_value("error")
#also publish this result
self.publish_event("error in " + str(functionName) + ": " + controlNode.get_child("result").get_value())
# except:
# self.logger.error("problem setting results from execution of #"+str(id))
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id" +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
pass
except Exception as ex:
errorString = str(sys.exc_info()[1])
self.logger.error("error inside execution thread, id " +str(id)+" functionname"+str(functionName)+errorString+" "+str(ex)+" "+str(traceback.format_exc()))
controlNode.get_child("status").set_value("interrupted")
controlNode.get_child("result").set_value("error:"+errorString)
controlNode.get_child("progress").set_value(0)
self.publish_event("error in "+str(functionName)+": "+errorString)
return
def get_error(self):
s=f"{sys.exc_info()[1]}, {traceback.format_exc()}"
return s
def log_error(self):
self.logger.error(self.get_error())
def show(self):
"""
show the current model as a ascii tree on he console
"""
with self.lock:
self.__show_subtree("1")
def save_model(self):
return self.save(self.currentModelName,includeData=False)
# save model and data to files
def save(self, fileName, includeData = True):
"""
save the model to disk, save the tables separately
the model file will be saves as ./models/fileName.model.json and the tables will be saved under
./models/filename.tablePath.npy
Args:
fileName to store it under, please don't give extensions
includeData : if set to False, we DONT store the values of node types tables or files to disk
"""
self.logger.debug(f"save model as {fileName} with data {includeData}")
self.publish_event(f"saving model {fileName}...")
with self.lock:
try:
m = self.get_model_for_web() # leave out the tables
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
if includeData:
self.ts.save(os.path.join(model_directory, model_filename))
f = open(os.path.join(model_directory, model_filename)+ ".model.json", "w")
f.write(json.dumps(m, indent=4))
f.close()
self.currentModelName = fileName
self.publish_event(f"model {fileName} saved.")
return True
except Exception as e:
self.logger.error("problem sving "+str(e))
self.publish_event(f"saving model {fileName} error")
return False
def move(self, nodeList, newParent, newIndex=None):
"""
move a list of nodes under a new Parent on the child position new Index
if the newParent is a referencer, we are creating references instead and keep the nodes where they are
Args:
nodeList [string]: a list of node descriptors of the nodes to move, scalar is also allowed
NewParent [string] a node descriptor for the new parent under which the nodes should appear
new Index int : the position on the children of newParent where the new nodes should appear
Returns:
True
"""
with self.lock:
if not type(nodeList) is list:
nodeList = [nodeList]
nodeIds = self.get_id(nodeList)
parentId = self.get_id(newParent)
if not parentId: return False
#check the special case that the parent is a referencer:
if self.model[parentId]["type"] == "referencer":
self.add_forward_refs(parentId,nodeIds)
self.logger.info("moves nodes as references "+ parentId + str(nodeIds))
return True
#for all others, we start moving nodes
self.logger.debug(f"model.move():{nodeIds}=>{parentId}")
try:
for id in nodeIds:
if id == parentId or id == "1":
self.logger.error("cant move " +id + " to " + parentId)
continue
oldParent = self.model[id]["parent"]
self.model[oldParent]["children"].remove(id) # remove the child from the old parent
self.model[id]["parent"]=parentId
if newIndex:
self.model[parentId]["children"].insert(newIndex,id) # at specific index
else:
self.model[parentId]["children"].append(id) # at the end
self.__notify_observers(oldParent, "children")
self.__notify_observers(parentId, "children")
except Exception as ex:
self.logger.error(f"problem moving {nodeIds} to new parent {parentId} this is critical, the model can be messed up {ex}")
return True
def clean_ts_entries(self):
"""
remove timeseries data that has no node and remove nodes (timeseries that have no timeseries data
"""
self.logger.debug("clean_ts_entries(): check consistency of model and timeseries table..")
deleteNodes = []
for id, node in self.model.items():
if node["type"] == "timeseries":
info = self.ts.get_info(id)
if "not found" in info:
self.logger.info(f" {node['name']}: has no time series date entry in the ts table, remove node")
deleteNodes.append(id)
for id in deleteNodes:
self.delete_node(id)
deleteTs=[]
for id in self.ts.get_items():
if id not in self.model:
self.logger.info(f" timeseries data {id} has no corresponding node in model .. delete the ts-data")
self.ts.delete(id)
def load(self,fileName,includeData = True, update = False):
"""
replace the current model in memory with the model from disk
please give only a name without extensions
the filename must be in ./models
Args:
fileName(string) the name of the file without extension, we also accept a dict here: a list of nodes
includeData bool: if set to false, the values for tables and files will NOT be loaded
update : if set to true, auto correct missing entries in known templates
"""
result = False
self.logger.info(f"load {fileName}, includeData {includeData}")
with self.lock:
self.publish_event(f"loading model {fileName}...")
self.disable_observers()
try:
if type(fileName) is str:
model_directory = None
model_filename = None
if os.path.isabs(fileName):
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
else:
file_directory = os.path.dirname(fileName)
if len(file_directory) == 0:
# we are only given a filename, use 21datalab subfolder models as directory
model_directory = os.path.join(os.path.dirname(__file__), "models")
model_filename = fileName
else:
# we are given a relative path + filename
model_directory = os.path.dirname(fileName)
model_filename = os.path.basename(fileName)
#if os.path.dirname(fileName)
f = open(os.path.join(model_directory, model_filename) + ".model.json","r")
model = json.loads(f.read())
self.model = model
f.close()
self.currentModelName = fileName
elif type(fileName) is dict:
self.model = copy.deepcopy(fileName) # take over the nodes
self.currentModelName = "fromNodes"
#now also load the tables
self.globalIdCounter = 0 #reset the counter and recover it further down
for nodeId in self.model:
if not self.idCreationHash:
#we only recover the counter if necessary
if int(nodeId)>self.globalIdCounter:
self.globalIdCounter = int(nodeId) # here, we recover the global id counter
if includeData:
if "version" in self.model["1"] and self.model["1"]["version"]>=0.1:
#new loader
self.ts.load(os.path.join(model_directory, model_filename))
else:
self.logger.debug("time series compatibility loader")
#we assume data in file and use the standard inmemory table storage
for nodeId in self.model:
if self.get_node_info(nodeId)["type"] == "table":
table = self.get_browse_path(nodeId)
data = numpy.load(os.path.join(model_directory, model_filename) + "." + table + ".npy")
#now find the time data, apply it to all variables
timeId=self.find_table_time_node(table)
ids = self.get_leaves_ids(table+".columns")
for id, column in zip(ids, data):
if id==timeId:
times = column
else:
self.ts.create(id)
self.set_properties({"type":"timeseries"},id)
self.ts.set(id,values=column)
for id in ids:
if id == timeId:
continue
self.ts.set(id,times=times)
self.clean_ts_entries() # make sure the model and ts table is consistent
self.instantiate_all_objects()
self.reset_all_objects()
self.enable_observers()
self.publish_event(f"loading model {fileName} done.")
self.model["1"]["version"]=self.version #update the version
result = True
except Exception as e:
self.logger.error("problem loading"+str(e))
self.publish_event(f"loading model {fileName} error.")
self.enable_observers()
result = False
if update:
self.update() # automatically adjust all widgets and other known templates to the latest style
return result
def create_differential_handle(self, user = None):
"""
make a copy of the current model and keep it as copy, create a handle for it and return that handle
this new handle is at the same time the id of te new "user", all the following requests for differential updata
will be referred to this user id
Returns:
a hash handle for the current model
"""
with self.lock:
#newHandle = str(uuid.uuid4().hex) # make a new unique handle
newHandle = str(self.diffHandleCounter)
self.diffHandleCounter += 1
if not user:
#also create a new user
user = newHandle
self.differentialHandles[newHandle]= {
"user":user,
"model":self.get_model_for_web(),
"time": int(time.time()),
"updateCounter": self.modelUpdateCounter
}# make an entry by copying the whole model
return newHandle
def get_differential_update(self,oldHandle,newHandle=None):
"""
this function takes the copy of the model (hopefully) held under handle and compares it to the current model:
the differences are analyzed and returned, t
to avoid endless storage of old references, we have the deletin stategy: for every "user" we keep a max of
self.differentialHandlesMaxPerUser, if we have more, we delete the oldest
Args:
oldHandle (string): the unique id of the old version of the model
newHandle (string): the unique id of the new version to compare to, if not given, we take the current
and will automatically make a new entry for the current
delOld: if set, we remove the old entry from the memorized models with a one step delay
Returns (dict):
containing information about the changes between and old and new version of the model
key values:
"handle":(string): the handle under which we find the new version of the model
"newNodes": (dict) nodes which are new to the tree in the form Nodeid:{properties}
"deletedNodeIds": (list) list of node ids which have been deleted
"modifiedNodes": (dict) nodes which have changed properties: if so, we give the full updated node back
"""
with self.lock:
diff={"handle":None,"newNodes":{},"deletedNodeIds":[],"modifiedNodes":{}} # the response for web
if oldHandle not in self.differentialHandles:
return None # the old handle does not exist, we can't handle this request
if newHandle is None:
# this is the standard case, we generate the new handle now
user = self.differentialHandles[oldHandle]["user"]
# we make a quick check if the model has changed at all, if not we simply return the old handle
if self.differentialHandles[oldHandle]["updateCounter"] == self.modelUpdateCounter:
self.logger.debug("get_differential_update: shortcut for no changes")
diff["handle"] = oldHandle
return diff
newHandle = self.create_differential_handle(user=user) # this function also makes a copy of the current tree and puts it in the self.differential handles list
newModel = self.differentialHandles[newHandle]["model"]
else:
if newHandle in self.differentialHandles:
newModel = self.differentialHandles[newHandle]
else:
return None # the newhandle did not exist
oldModel = self.differentialHandles[oldHandle]["model"]
# delete strategy: for every "user" we track a maximum of self.differentialHandlesMaxPerUser
users={}
for handle,entry in self.differentialHandles.items():
user = entry["user"]
if user not in users:
users[user]={}
users[ user][ handle ] = entry["time"]
for user,entries in users.items():
if len(entries)> self.differentialHandlesMaxPerUser:
#must clean up history of that user, entries is a dict of handle:time
sortedKeys =[key for key, value in sorted(entries.items(), key=lambda item: item[1])]
removeKeys = sortedKeys[:-self.differentialHandlesMaxPerUser]
self.logger.debug("remove handle"+str(removeKeys)+" of user"+user)
for key in removeKeys:
del self.differentialHandles[key]
#find the changes between the models
for newNodeId in newModel:
if newNodeId not in oldModel:
#this node is not found in the old model, so it is new
diff["newNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
else:
#this node is in both models, check if there was a change insight the nodes
#for a deep comparison, serialize them
newNodeSerialized = json.dumps(newModel[newNodeId],sort_keys=True)
oldNodeSerialized = json.dumps(oldModel[newNodeId],sort_keys=True)
if newNodeSerialized != oldNodeSerialized:
#something is different, so return that node
diff["modifiedNodes"][newNodeId]=copy.deepcopy(newModel[newNodeId])
#now check for deleted once, these appear in the old but not in the new
diff["deletedNodeIds"]=list(set(oldModel.keys())-set(newModel.keys()))
diff["handle"]=newHandle
return diff
def publish_event(self, event):
"""
send out an event e.g. for status information
event to send looks like
event = { "id": 1123,
"event": "system.status"
"data:"{"nodeId":xx, "value":..,"function":... ...}
}
Args
event [string or dict]
"""
self.logger.debug(f"publish_event ({event})")
self.modelUpdateCounter += 1
if type(event) is str:
#make sure the formatting is json compatible
event = event.replace("'",'"')# ' => "
event={"event":"system.status","data":{"text":event}}
event["id"]=self.modelUpdateCounter
for observerObject in self.observers:
observerObject.update(event)
def disable_observers(self):
self.lock_model()
#with self.lock:
self.disableObserverCounter += 1
#self.logger.debug(f"disable_observers() {self.disableObserverCounter}")
def enable_observers(self):
self.release_model()
if self.disableObserverCounter >0:
self.disableObserverCounter -=1
else:
self.logger.error("enable_observers without disable observers")
#self.logger.debug(f"enable_observers() {self.disableObserverCounter}")
def notify_observers(self, nodeIds, properties, eventInfo={}):
"""
public wrapper for __notify observser, only expert use!
"""
#self.logger.info(f"notify observses(), {str_lim(nodeIds,50)}, {properties}")
return self.__notify_observers(nodeIds,properties,eventInfo)
def get_referencers(self,descList,deepLevel = 0):
"""
get the references to this node via backtraversing the leaves algorithm
we look for parents through deepLevel levels and from there on we look back for referencers
deepLevel is the the level of extra parent level: 1 means the one more level, two means two extra level
Returns:
a list of referencers ids that point to the given descList nodes
"""
#convert all to nodes to ids
if type(descList) is not list:
descList = [descList]
startList = set([self.__get_id(node) for node in descList])
startList =set([node for node in startList if node]) #remove None and duplicates
referencers = set() #we collect the parents here and avoid duplicates
#in this first iteration we take the referencers pointing directly to the nodes or their parents
workList = startList.copy()
for level in range(deepLevel+1):
#from this level we take the backrefs
for id in workList:
referencers.update(self.model[id]["backRefs"])
#prepare parents for next round
parents=set()
for id in workList:
myParent=self.model[id]["parent"]
if myParent not in ["0","1"]: #root
parents.update([myParent]) #!use list to avoid break into chars
#now take the parents as currentList
workList = parents.copy()
if workList ==[]:
break #avoid turning cycles for nothing
#second step:
# now we take all final referencers and all referencers to those referencers with no limit
# (go back the leaves algorithm)
collectedReferencers = referencers.copy() # we take all we have so far
while True:
workList=set()
for id in referencers:
workList.update(self.model[id]["backRefs"])
collectedReferencers.update(workList)
if not workList:
break
else:
#one more round
referencers = workList.copy()
return list(collectedReferencers)
def __notify_observers(self, nodeIds, properties, eventInfo={} ):
"""
this function is called internally when nodes or properties have changed. Then, we look if any
observer has to be triggered
we also increase the counter and time on the root.observers.modelObserver
Args:
nodeId: the nodeIds where a change occurred
properties: the property or list of properties of the node that has changed
"""
#exception for the progress node
if type(properties) is not list:
properties = [properties]
if type(nodeIds) is not list:
nodeIds = [nodeIds]
if self.disableObserverCounter>0:
#only one exception: progress works always
mustReturn = True
with self.lock:
for nodeId in nodeIds:
if self.model[nodeId]["name"] == "progress":
mustReturn = False
break
if mustReturn:
#self.logger.info(f"__notify_observers disable return {nodeIds} {properties}")
return
with self.lock:
# this is for the tree updates, any change is taken
self.modelUpdateCounter = self.modelUpdateCounter + 1 #this is used by the diff update function and model copies
collectedEvents=[]
enableTree = self.get_node("root.system.enableTreeUpdateEvents")
if enableTree and enableTree.get_value()==False:
pass
else:
# Notify all observers about the tree update, this is a standard
event = {
"id": self.modelUpdateCounter,
"event": "tree.update",
"data": ""}
collectedEvents.append(event) # send later
names =[self.model[id]["name"] for id in nodeIds]
self.logger.debug(f"__notify_observers {len(nodeIds)} ids:{str_lim(names,100)}: {properties}")
triggeredObservers=[] # we use this to suppress multiple triggers of the same observer, the list holds the observerIds to be triggered
#p=utils.Profiling("__notify.iterate_nodes")
referencers = self.get_referencers(nodeIds,deepLevel=5)#deeplevel 5: nodes can be organized by the user in hierachy
nodeId = self.__get_id(nodeIds[0])#take the first for the event string,
#p.lap(f"get refs for {nodeId}")
self.logger.debug(f"__notify on {len(referencers)} referencers: {str_lim([self.get_browse_path(id) for id in referencers],200)}")
for id in referencers:
if self.model[id]["name"] == "targets" and self.model[self.model[id]["parent"]]["type"] == "observer":
# this referencers is an observer,
observerId = self.model[id]["parent"]
observer = self.get_children_dict(observerId)
# check if trigger
if observer["enabled"]["value"] == True:
#self.logger.debug(f"{self.model[nodeId]['name']} is targeted by observer {self.get_browse_path(observerId)}")
if observerId in triggeredObservers:
self.logger.debug(f"we have triggered the observer {self.get_browse_path(observerId)} in this call already, pass")
continue
#self.logger.debug(f"check properties to triggered the observer {self.get_browse_path(observerId)}")
#check if any of the observed properties matches
propertyMatch = False
for property in properties:
if property in observer["properties"]["value"]:
propertyMatch=True
break
if not propertyMatch:
#self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} no property match ")
pass
else:
self.logger.debug(f"observer trigger on {self.get_browse_path(observerId)} for change in {property}")
self.model[observer["triggerCounter"]["id"]]["value"] = self.model[observer["triggerCounter"]["id"]]["value"]+1
self.model[observer["lastTriggerTime"]["id"]]["value"] = datetime.datetime.now().isoformat()
for funcNodeId in self.get_leaves_ids(observer["onTriggerFunction"]["id"]):
self.logger.debug(f"execute ontrigger function {funcNodeId}")
self.execute_function(funcNodeId)
if "triggerSourceId" in observer:
self.model[observer["triggerSourceId"]["id"]]["value"] = nodeId
if observer["hasEvent"]["value"] == True:
#self.logger.debug(f"send event {observer['eventString']['value']}")
#also send the real event
#self.modelUpdateCounter = self.modelUpdateCounter+1
event = {
"id": self.modelUpdateCounter,
"event": observer["eventString"]["value"],
"data": {"nodeId":observerId,"sourceId":nodeId,"sourcePath":self.get_browse_path(nodeId)}}
if self.model[nodeId]["type"] not in ["column","file","timeseries"]:
event["data"]["value"]=self.model[nodeId]["value"]
#some special handling
try:
if event["event"] == "system.progress":
progressNode = self.get_node(self.get_leaves_ids("root.system.progress.targets")[0])
event["data"]["value"] = progressNode.get_value()
event["data"]["function"] = progressNode.get_parent().get_parent().get_browse_path()
else:
eventNode = self.get_node(observerId)
extraInfoNode = eventNode.get_child("eventData")
if extraInfoNode:
extraInfo = extraInfoNode.get_value()
if type(extraInfo) is not dict:
extraInfo={"info":extraInfo}
event["data"].update(extraInfo)
if eventInfo:
event["data"]["_eventInfo"]=eventInfo #put this only if we have info
except Exception as ex:
self.logger.error(f"error getting extra info for event {ex}, {sys.exc_info()[0]}")
#for all other events, take the event data if there is one (as json)
self.logger.debug(f"generate event {event}")
collectedEvents.append(event)
triggeredObservers.append(observerId)# next time, we don't trigger
#p.lap("complete backrefs {nodeId}, {backrefs}")
#self.logger.debug(p)
#self.logger.debug("now send the events")
#event = copy.deepcopy(event)
for event in collectedEvents:
for observerObject in self.observers:
observerObject.update(event)
self.logger.debug(f"done sending {len(collectedEvents)} events")
def create_observer(self):
# Instantiate a new observer
observer = Observer(self)
# attach it to the model
self.attach_observer(observer)
# return the observer
return observer
def attach_observer(self, observer):
# Add a new observer
self.logger.debug(f"Adding new observer: {id(observer)}")
with self.lock:
self.observers.append(observer)
def detach_observer(self, observer):
with self.lock:
try:
self.observers.remove(observer)
self.logger.debug(f"Removing observer: {id(observer)}")
except ValueError:
self.logger.exception("Trying to remove an observer which doesn't exist in the list of observers.")
def set_column_len(self,nodeDescriptor,newLen):
"""
adjust the len of a colum, extension are inf-padded,
Args: nodeDescriptor: the node
newLen (int) the new lenth of the column
Returns:
the new value set or none if problem
"""
with self.lock:
id = self.get_id(nodeDescriptor)
if not id: return None
if self.model[id]["type"] != "column":
self.logger.error("set_column_len: not a column")
return None
#now make the adjustments
if type(self.model[id]['value']) != numpy.ndarray:
self.model[id]['value'] = numpy.full(newLen, numpy.nan)
else:
#is already an array
if len(self.model[id]['value']) == newLen:
#nothing to do
pass
if len(self.model[id]['value']) > newLen:
self.model[id]['value'] = self.model[id]['value'][0:newLen]
elif len(self.model[id]['value']) < newLen:
self.model[id]['value'] = numpy.append(self.model[id]['value'], numpy.full(dataLen-len(self.model[id]['value']), numpy.nan))
else:
#same len
pass
return newLen
def get_upload_folder_files(self, matchFilter=None, blackList = []):
"""
Args:
fileNameMatch: a string that must be contained in the files to deliver
blackList: a list of filenames which should not be delivered
Returns list of files with absolute file names, list of files with fileNames
"""
full_path = os.path.realpath(__file__) # returns a string representing the canonical path, argument file is a file system path
path, filename = os.path.split(full_path)
folder = path+r'\upload'
absFileNames = []
foundFileNames = []
#now iterate the uploaded files
fileNames = os.listdir(folder)
for idx,fileName in enumerate(fileNames):
if matchFilter:
if matchFilter not in fileName:
continue # this file will be ignored
if fileName in blackList:
continue
foundFileNames.append(fileName)
absFileNames = [folder+"\\"+fileName for fileName in foundFileNames]
return foundFileNames,absFileNames
def update(self):
"""
update all known widgets to the latest template including complex backward compatibility changes
:return:
"""
self.logger.info("update() running...")
self.disable_observers()
try:
# the ts widgets:
# now go throught the widget and update all according the template
# now find all type widget
newNodes = {}
helperModel = Model()
helperModel.disable_observers()
helperModel.create_template_from_path("root.widget", self.get_templates()['templates.timeseriesWidget'])
widgets = []
for id, props in self.model.items():
if props["type"] == "widget":
widgetObject = self.get_node(id)
if widgetObject.get_child("widgetType").get_value() == "timeSeriesWidget":
widgets.append(id)
self.logger.debug(f"update():found widget {widgetObject.get_browse_path()}")
for id in widgets:
path = self.get_browse_path(id)
mirrorBefore = self.get_branch_pretty(path)
self.create_template_from_path(path,self.get_templates()['templates.timeseriesWidget']) # this will create all nodes which are not there yet
# now make specific updates e.g. linking of referencers, update of list to dicts etc.
# if colors is a list: make a dict out of it
colors = self.get_value(f"{id}.hasAnnotation.colors")
tags = self.get_value(f"{id}.hasAnnotation.tags")
if type(colors) is list:
colors = {v:{"color":colors[idx],"pattern":None} for idx,v in enumerate(tags)}
self.logger.debug(f"update(): set value{id}.hasAnnotation.colors := {colors} ")
self.set_value(f"{id}.hasAnnotation.colors",colors)
if not "visibleTags" in mirrorBefore["hasAnnotation"] or (self.get_value(f"{id}.hasAnnotation.visibleTags") != mirrorBefore["hasAnnotation"]["visibleTags"][".properties"]["value"]):
#it is different or new, so we created it now
visibleTags = {tag:True for tag in tags}
#make sure that from the colors, we take them as well
updateVisibleTags = {tag:True for tag in colors}
visibleTags.update(updateVisibleTags)
self.set_value(f"{id}.hasAnnotation.visibleTags",visibleTags)
self.logger.debug(f"update(): set value{id}.visibleTagss := {visibleTags} ")
#make sure the hasAnnotation.annotations referencer points to newannotations as well
self.add_forward_refs(f"{id}.hasAnnotation.annotations",[f"{id}.hasAnnotation.newAnnotations"],allowDuplicates=False)
#now make sure the observers have at least the required properties enabled
widget = self.get_node(id)
helperRoot = helperModel.get_node("root.widget")
template = self.get_templates()['templates.timeseriesWidget']
children = helperRoot.get_children(3)
print(f"2 level children {[node.get_browse_path() for node in children]}")
for child in helperRoot.get_children():
if child.get_properties()["type"] == "observer":
widgetNode = widget.get_child(child.get_name()).get_child("properties")
helperNode = child.get_child("properties")
for prop in helperNode.get_value():
current = widgetNode.get_value()
if prop not in current:
current.append(prop)
widgetNode.set_value(current)
for child in helperRoot.get_children(3):
if child.get_properties()["type"] == "referencer":
self.logger.debug(f"found referencer {child.get_name()}")
# now adjust the references of new nodes and of the ones that were there
targets = child.get_properties()["forwardRefs"]
if targets:
targets = [helperModel.get_browse_path(ref) for ref in targets]
requiredTargets = [widget.get_browse_path()+"."+".".join(ref.split(".")[2:]) for ref in targets]
self.logger.debug(f"required targets {requiredTargets}")
#now check in the model
widgetNodePath = widget.get_browse_path()+ child.get_browse_path()[len(helperRoot.get_browse_path()):]
widgetNode = self.get_node(widgetNodePath)
#now check if we have them
targetPaths = [tNode.get_browse_path() for tNode in widgetNode.get_targets()]
for target in requiredTargets:
if target not in targetPaths:
self.logger.debug(f"adding ref {widgetNode.get_browse_path()} => {target}")
self.add_forward_refs(widgetNode.get_id(),[target])
#now the system progress observer
if not self.get_node("root.system.progress"):
self.create_template_from_path("root.system.progress",self.get_templates()['system.observer'])
self.set_value("root.system.progress.hasEvent",True)
self.set_value("root.system.progress.eventString","system.progress")
self.set_value("root.system.progress.properties",["value"])
self.set_value("root.system.progress.enabled",True)
except Exception as ex:
self.logger.error(f" {ex} , {sys.exc_info()[0]}")
helperModel.delete()
helperModel.delete()
self.enable_observers()
# ########################################
# time series api
def time_series_create(self,desc):
id = self.get_id(desc)
return self.ts.create(id)
def time_series_delete(self,desc):
id = self.get_id(desc)
return self.ts.delete(id)
def time_series_insert(self, desc, values=None, times=None, allowDuplicates = False):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.insert(id,values, times,allowDuplicates=allowDuplicates)
self.__notify_observers(id, "value")
return result
def time_series_append(self, desc, values=None, times=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.append(id,values, times)
self.__notify_observers(id, "value")
return result
def time_series_delete_area(self,desc,start=None,end=None):
id = self.get_id(desc)
if not id in self.model:
return None
with self.lock:
result = self.ts.delete_area(id,start=start,end=end)
self.__notify_observers(id, "value")
return result
def time_series_merge(self, desc, values = None, times = None):
id = self.get_id(desc)
if not id in self.model:
return False
return self.ts.merge(id,values=values,times=times)
def time_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
result = self.ts.set(id,values=values,times=times)
self.__notify_observers(id, "value")
return result
def time_series_get_table(self,
variables,
tableDescriptor = None,
start=None,
end=None,
noBins=None,
includeIntervalLimits=False,
resampleTimes=None,
format="default",
toList = False,
resampleMethod = None,
copy=True):
"""
get a time series table from variables (nodes of type "timeseries").
Args:
variables [list of ode descriptors]: nodes to be part the data table requested (ordered!)
tableDescriptor : a desc for the table where the variables reside
possible addressing of te request nodes:
1) ids or browsepaths of nodes (no tableDescriptor needed)
2) names of nodes and tableDescriptor of the table (names must be unique in the columns of the table)
startime, endTime [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
noBins(int): the number of samples to be returned inside the table between start end endtime,
if None is given, we return all samples (rows) we have in the table and to not aggregate
includeIntervalLimits [bool]: if set to true, we will include one more data point each left and right of the requested time
format: [enum] "default", "flat", see return description
resampleMethod [enum]:
how to resample if we need to; options are:
None (if not specified): sample and hold
"linear": linear interpolation
"linearfill": linear interpolation and also interpolate "nan" or "inf" values in the original data
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"var_a":{"values":[],"__time":[]}, "var_b":{"values":[],"__time":[]..}
"flat" return the result as {"var_a":[], "var_a__time":[],"var_b":[],"var_b__time":[]....}
the variable descriptor are the ones given in the request
"__time" : list of timestamps for the returned table in epoch seconds as float64
"values": the list of float values of one of the requested variables
"""
if tableDescriptor:
tableId = self.get_id(tableDescriptor)
tableVars = self.get_leaves(tableId+".columns")
else:
tableId = None
if type(start) is str:
start = date2secs(start)
if type(end) is str:
end = date2secs(end)
with self.lock:
#first check if all requested timeseries exist and have type time series
#vars = [] #self.get_id(variables)
if not type(variables) is list:
variables= [variables]
varIds = {} # NodeId: request descriptor
for var in variables:
varId = self.get_id(var)
if not varId:
#try to find per columns and table desc
found = False
if tableId:
for tableVar in tableVars:
if tableVar["name"] == var:
varId = tableVar["id"]
found = True
break
if not found:
self.logger.error(f"requested variable {var} does not exist")
return False
if self.model[varId]["type"]!="timeseries":
self.logger.error(f"requested variable {var} not timeseries, instead {self.model[varId]['type']}")
return False
varIds[varId]=var #remeber it for later
table = self.ts.get_table(list(varIds.keys()), start=start, end=end, copy=copy, resampleTimes=resampleTimes, noBins = noBins, includeIntervalLimits=includeIntervalLimits,resampleMethod=resampleMethod)
#now wrap back the descriptor to the query, if is was a browsepath, we return and browsepath, if is was an id, we return id
# make some formatting
def convert(input,toList=toList):
if toList:
return list(input)
else:
return input
result = {}
for k,v in table.items():
if format=="flat":
result[varIds[k]]=convert(v["values"])
result[varIds[k]+"__time"]=convert(v["__time"])
else:
result[varIds[k]] = {"values":convert(v["values"]),"__time":convert(v["__time"])}
#if len(variables) == 1:
# #we only have one variable, so we return without descriptor
# result = result[list(result.keys())[0]]
return result
def time_series_get_info(self,name=None):
return self.ts.get_info(name)
def time_series_get_raw(self,id,start=None,end=None):
table = self.ts.get_table([id], start=start, end=end, copy=False, resampleTimes=None,
noBins=None, includeIntervalLimits=False,
resampleMethod=None)
result = table[id]
return result
def time_series_insert_blobs(self, tableDesc, blobs=[]):
""" blob is a dict or list of dicts of key and values containing one time base like
the descriptors of teh variables can be ids, browsepaths or just names (without dots)
if the descriptors are names, we try to find them in the model, they must exist there uniquely, otherwise
they cant be processed
we also autocreate the table or missing variables
the data will be put in a table:
- we try to find the table based on one of the variables, if not found, we create the table
{
"a": [1.5,1.6,1.7]m
"b": [2,3,4]
"__time" :[100001,100002,100003]
}
"""
if not type(blobs) is list:
blobs=[blobs]
#first, find the table
with self.lock:
tableId = self.get_id(tableDesc)
if not tableId:
#try to find the table from the first node
#table not found, create it
tableId = self.create_node_from_path(tableDesc,properties={"type":"table"})
if tableId:
columnsId = self.create_node(parent=tableId, name="columns", properties={"type": "referencer"})
variablesId = self.create_node(parent=tableId, name="variables", properties={"type": "folder"})
else:
self.logger.error(f"cant create table {tableDesc}")
return False
else:
columnsId = self.get_child(tableId,"columns")
variablesId = self.get_child(tableId, "variables")
#now we know the tableId, columnsId, variablesId
# iterate over all blobs and find the ids of the names in the blobs, if not found, create it
# exchange the descriptors to ids
desc2Id = {} # key: the descriptor from the input blob v: the id in the model
tableVars = self.get_leaves(columnsId)
desc2Id = {dic["name"]:dic["id"] for dic in tableVars} # key: the descriptor from the input blob v: the id in the model, preload with the names
#convert all to ids
newBlobs=[]
idsInBlobs=[]
for blob in blobs:
newBlob={}
for k,v in blob.items():
if k=="__time":
newBlob[k]=v
else:
#does this id already exist?
if k in desc2Id:
id = desc2Id[k]
else:
id = None
#try to find
for var in tableVars:
if var["name"] == k:
id = v["id"]
break
if not id:
#still not found, we need to create it
id = self.create_node(parent=variablesId,name=k,properties={"type": "timeseries"})
if not id:
self.logger.error(f"cant find or create {name}")
continue
else:
self.add_forward_refs(columnsId,[id])
desc2Id[k]=id #remember to speed up next time
newBlob[id] = v
idsInBlobs.append(id)
newBlobs.append(newBlob)
self.logger.debug(f"inserting blobs {len(newBlobs)}")
self.__notify_observers(idsInBlobs, "value")
result = self.ts.insert_blobs(newBlobs)
return result
# ########################################
# event series api
def event_series_create(self,desc,map={}):
id = self.get_id(desc)
if "eventMap" in self.model[id]:
self.model[id]["eventMap"].update(map)
else:
self.model[id]["eventMap"]=map.copy()
return self.ts.create(id)
def event_series_get_new_number_entry(self,id):
eventMap = self.model[id]["eventMap"]
numbers = [v for k, v in eventMap.items()]
newNumber = max(numbers)+1
while newNumber in numbers:
newNumber = newNumber+1
return newNumber
def event_series_get_event_number(self, desc, event, autoCreate=True):
id = self.get_id(desc)
if not id:
return None
with self.lock:
eventMap = self.model[id]["eventMap"] # a dict like {"starting":1, "machineStop":2,...}
if type(event) in [str,numpy.str_]:
if event not in [k for k,v in eventMap.items()]:
if not autoCreate:
return None
# we must put a new eventString
if eventMap == {}:
newEventNumber = 1
else:
newEventNumber = self.event_series_get_new_number_entry(id)
self.model[id]["eventMap"][event] = newEventNumber
return newEventNumber
else:
#is a known event string, get the number
return eventMap[event]
else:
#this is a number already, check if it is in the map
eventNumbers = [v for k,v in eventMap.items()]
if event in eventNumbers:
return event
else:
if not autoCreate:
return None
#must create a new entry
try:
#to make sure we have only numbers there
newEventString = "event_"+str(int(event))
self.model[id]["eventMap"][newEventString]=int(event)
except:
self.log_error()
return None
return event
def event_series_insert(self, desc, values=None, times=None, allowEventDuplicates = False):
"""
Args:
values: list of events, where the event is either an eventString or an event number
if values is a scalar, we assume that for all times the same event will be inserted
allowEventDuplicates: set this to true allowes the same events to appear multiple times on the same time
different events are always allowed on the same time
"""
id = self.get_id(desc)
if not id in self.model:
return None
if not values or not times:
return None
if not(type(values) is list or type(values) is numpy.ndarray):
values = [values]*len(times)
#convert the values to numbers and create new map entry if needed
numbers = numpy.asarray([self.event_series_get_event_number(id,event) for event in values],dtype=numpy.int)
#convert the times to epoch if not already done
epochs = numpy.asarray([t if type(t) is not str else date2secs(t) for t in times ],dtype=numpy.float64)
if not allowEventDuplicates:
# we must delete the events which exist already at the same time with the same event
data = self.event_series_get(desc)
takeIndices = numpy.full(len(times),True)
for idx,tim in enumerate(times):
duplicates = numpy.where(data["__time"]==tim)[0]
for pos in duplicates:
if numbers[idx] == data["values"][pos]:
takeIndices[idx] = False
numbers = numbers[takeIndices]
epochs = epochs[takeIndices]
with self.lock:
#on the TimeSeries class the allowDuplicates means that the same time can appear mulitple times
# such that different or the same events can happen at the same time and thus produce the same
# time stamp in the time series
result = self.ts.insert(id,numbers, epochs, allowDuplicates=True)# we allow 2 events to appear on the same time!
self.__notify_observers(id, "value")
return result
def event_series_set(self,desc,values=None,times=None):
id = self.get_id(desc)
if not id in self.model:
return None
if self.lock:
# now "refresh" the event map
#self.model[id]["eventMap"]={}
numbers = [self.event_series_get_event_number(id, event) for event in values]
result = self.ts.set(id,values=numbers,times=times)
self.__notify_observers(id, "value")
return result
def event_series_get(self,desc, start=None,end=None,format="default",eventFilter=None):
"""
get events from a event series
Args:
desc: node descricptor
start , end [float]:
the start and endtime of the table given as seconds since epoch
we also allow the special case of endTime = 0 and startTime = -interval
we also allow the special case of startTime given and end time= 0
format: [enum] "default"
eventFilter : [string] a list of eventStrings as positive match filter
toList: (bool) True: return data as python list, False: return numpy arrays
examples:
- get all data of the variables
data = m.get_time_series_table(["root.mytable.variables.a","root.mytable.variables.b"]) # get all data
- request max 300 values of data (this is what the UI does)
data = m.get_time_series_table(["a","b"],"root.mytable",start=1581483065.323,end=1581483080.323,noBins=300,includeIntervalLimits=True)
- request data and resample to equiditant 25 sec spacing, also fill possible nan values with interpolation
times = list(range(1581483065,1581483065+100,25))
data = m.get_time_series_table(["a","b"],"root.mytable",resampleTimes = times,resampleMethod = "linearfill")
Returns(dict)
formatting depends on the "format" option
"defaut": return the result as {"values":[],"__time":[], "eventstrings": "map":{1:"myevent",2:"anotherevent"}
"""
id = self.get_id(desc)
if not id:
return None
data = self.ts.get_table([id], start=start, end=end)
if data == {}:
#this variable is not in the store
data = {id:{"values":numpy.asarray([]),"__time":numpy.asarray([])}}
eventMap = self.model[id]["eventMap"].copy()
reverseMap = {v:k for k,v in eventMap.items()}
values = data[id]["values"].astype(numpy.int)
times = data[id]["__time"]
#now filter
if eventFilter:
filter = []
if type(eventFilter) is not list:
eventFilter = [eventFilter]
for evString in eventFilter:
if evString in eventMap:
filter.append(eventMap[evString])
indices = [idx for idx,val in enumerate(values) if val in filter]
values = values[indices]
times = times[indices]
result = {
"values":values,
"__time":times,
"eventMap":eventMap,
"eventStrings":[reverseMap[v] for v in values]
}
if format == "iso":
#convert the timestamps to iso
result["__time"]=[epochToIsoString(t) for t in result["__time"]]
if format == "events":
existingEvents = set(result["values"])
events = {reverseMap[ev]:[] for ev in existingEvents}
for ev,ti in zip(result["values"],result["__time"]):
events[reverseMap[ev]].append(ti)
result["events"]=events
del result["values"]
del result["__time"]
del result["eventStrings"]
return result
def event_series_insert_blob(self,blob):
"""
insert events in various blob syntax
Args:
desc: the node descriptor
blob: a dictionary in various styles
a) {
"node": nodedescriptor
"events":"startMachine"
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
b) {
"node": nodedescriptor
"events":["startMachine","stopMachine","startMachine","startMachine]
"__time": ["2018.01.01T00:10:08.445+02:00",1546437120.2,1546437121.2,1546437122.2]# allowes iso or epoch
}
c) "events:[
{"event":"startMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
},
{"event":"stopMachine",
"__time":"2018.01.01T00:10:08.445+02:00"
}
Returns
true/false for success
"""
if type(blob["events"]) is not list:
#style a)
events = blob["events"]
times = blob["__time"]
else:
#events is a list
if type(blob["events"][0]) is dict:
#style c)
events = []
times = []
for d in blob["events"]:
events.append(d["event"])
times.append(d["__time"])
else:
#style b)
events = blob["events"]
times = blob["__time"]
return self.event_series_insert(blob["node"],events,times)
def event_series_delete(self,desc,start=None, end = None, eventsToDelete=[]):
id = self.get_id(desc)
if not id:
return None
if start == None and end == None and eventsToDelete == []:
#delete all
with self.lock:
self.model[id]["eventMap"]={}
result = self.ts.set(id, values=[], times=[])
else:
#delete some events
with self.lock:
data = self.ts.get_table([id])
if not start:
start = 0
if not end:
end = numpy.inf
times = data[id]["__time"]
values = data[id]["values"]
over = times>=start
under = times<=end
deleteMaskTime = over & under
if eventsToDelete == []:
deleteMaskValues = numpy.full(len(deleteMaskTime),True)
else:
deleteMaskValues = numpy.full(len(deleteMaskTime),False)
for ev in eventsToDelete:
evNumber = self.model[id]["eventMap"][ev]
mask = values == evNumber
deleteMaskValues = deleteMaskValues | mask
deleteMask = deleteMaskTime & deleteMaskValues
times = times[~deleteMask]
values = values[~deleteMask]
self.event_series_set(id,values,times)
print(data)
def get_object(self,desc):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return None
if "object" not in self.model[id]:
return None
return self.model[id]["object"]
def instantiate_object(self,desc,writeToModel=True):
id = self.get_id(desc)
if not id:
return False
with self.lock:
if not self.model[id]["type"] == "object":
return False
try:
className = self.model[id]["class"]
if "autoReload" in self.model[id] and self.model[id]["autoReload"]==True and self.global_auto_reload_enabled():
# must reload the module
module = importlib.reload(self.objectClasses[className]["module"])
classDefinition = getattr(module, className.split('.', 1).pop())
# now update our global list
self.objectClasses[className]["module"] = module
self.objectClasses[className]["class"] = classDefinition
classDefinition = self.objectClasses[className]["class"]
object = classDefinition(self.get_node(id)) #instantiate the object
if writeToModel:
self.model[id]["object"]=object
return object
except:
self.log_error()
return None
def instantiate_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.instantiate_object(id)
except:
self.log_error()
def reset_all_objects(self):
with self.lock:
#make a list first for iteration, we can't iterate over the model,
# as the instantiation of object might produce new nodes while we iterate
objects = [k for k,v in self.model.items() if v["type"] == "object"]
for id in objects:
try:
self.get_object(id).reset(None)
except:
self.log_error()
def global_auto_reload_enabled(self):
if self.get_value("root.system.enableAutoReload") == False:
return False
else:
return True # this will also be the case if the node is not there, as the get_value return None then
def create_test(self,testNo=1):
"""
this functions crates tests for demostrating purposes
"""
if testNo == 1:
self.create_node("root",name="variables",type="folder")
for var in ["f0","f1","f2","f3","count","time","back"]:
self.create_node("root.variables",name=var,type="column")
self.create_node_from_path('root.folder2.myconst',{"type":"const","value":"21data"})
self.create_node_from_path('root.folder2.myfkt', {"type": "function"})
#for the visu
self.create_node_from_path('root.visualization.pipelines.occupancy.url',{"type":"const","value":"http://localhost:5006/bokeh_web"})
self.create_node_from_path('root.visualization.pipelines.demo2.url',{"type":"const","value":"http://21data.io"})
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is a great table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "numberOfRows",
"type": "variable",
"value": 0
}
]
self.create_node("root", name="mytable", type="table")
self.create_nodes_from_template("root.mytable", template=template)
for var in ["f0","f1","f2","f3","time","back"]:
self.add_forward_refs("root.mytable.columns",["root.variables."+var])
self.add_forward_refs("root.mytable.timeField", ["root.variables.time"])
#add data
startTime=datetime.datetime(2018,1,1,0,0,0,tzinfo=pytz.UTC)
vars={"f0":0.01,"f1":0.02,"f2":0.04,"f3":0.1,"back":0.01}
SIZE = 10*60 # in seconds units
STEP = 0.1
#!!! we are producing size/step time points
""" for i in range(SIZE):
dataDict = {}
for var in vars:
value = numpy.cos(2*numpy.pi*vars[var]*i/SIZE*3)
dataDict["root.variables."+var]=value
mytime = startTime + datetime.timedelta(seconds = i)
dataDict["root.variables.time"] = mytime
#print(mytime)
self.add_timeseries(dataDict)
"""
startEpoch = date2secs(startTime)
times = numpy.arange(startEpoch,startEpoch+SIZE,STEP,dtype=numpy.float64)
print("we have time:",times.shape)
for var in vars:
values = numpy.cos(2*numpy.pi*vars[var]*times)
id=self.get_id("root.variables."+str(var))
if var =="back":
#we make -1,0,1 out of it
values = numpy.round(values)
self.model[id]["value"]=values.tolist()
id = self.get_id("root.variables.time")
self.model[id]["value"]=(times).tolist()
#now correct the background
#now make some widget stuff
self.create_node_from_path('root.visualization.widgets.timeseriesOne',{"type":"widget"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectableVariables',
{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.selectedVariables',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.startTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.endTime',
{"type": "variable","value":None})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.bins',
{"type": "const","value":300})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation',
{"type": "const", "value": True})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasSelection',
{"type": "const", "value": False})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.annotations',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations',
{"type": "folder"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.tags',
{"type": "const","value":["one","two"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.hasAnnotation.colors',
{"type": "const","value":["yellow","brown","greay","green","red"]})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.table',
{"type": "referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.lineColors',
{"type": "const", "value": ["blue", "yellow", "brown", "grey", "red"]})
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectedVariables',['root.variables.f0','root.variables.f1','root.variables.f3'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.selectableVariables',['root.variables'])
self.add_forward_refs('root.visualization.widgets.timeseriesOne.table',['root.mytable'])
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observer',{"type":"referencer"})
self.create_node_from_path('root.visualization.widgets.timeseriesOne.observerUpdate', {"type": "const","value":["line","background","annotations"]})
#now the annotations
anno = [
{
"name": "tags",
"type": "const",
"value": ["one","two"]
},
{
"name": "startTime",
"type": "const",
"value": None
},
{
"name": "endTime",
"type": "const",
"value": None
},
{
"name": "text",
"type": "const",
"value": "this is a great annotation"
}
]
tags=["one","two","one","one","two","two","one","one","one","two","one","one"]
self.create_node_from_path("root.annotations",{"type":"folder"})
startTime = datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)
for i in range(10):
newAnno = copy.deepcopy(anno)
newAnno[1]["value"] = (startTime + datetime.timedelta(minutes=(i*10))).isoformat()
newAnno[2]["value"] = (startTime + datetime.timedelta(minutes=(i*10+1))).isoformat()
newAnno[0]["value"] = [tags[i],tags[i+1]]
newAnnoPath = "root.annotations.anno"+str(i)
self.create_node_from_path(newAnnoPath,{"type":"annotation"})
self.create_nodes_from_template(newAnnoPath,newAnno)
#also add the annotations to the widget
self.add_forward_refs("root.visualization.widgets.timeseriesOne.hasAnnotation.annotations",["root.annotations","root.visualization.widgets.timeseriesOne.hasAnnotation.newAnnotations"])
#make a real function
self.create_node_from_path("root.functions",{"type":"folder"})
self.create_nodes_from_template("root.functions",[self.templates["testfunction.delayFunctionTemplate"]])
#now make cutom function to trigger something
self.create_nodes_from_template("root.functions",[self.templates["counterfunction.counterFunctionTemplate"]])
#now hook the function output to the observer of the plot
self.add_forward_refs('root.visualization.widgets.timeseriesOne.observer',['root.functions.counterFunction.output'])
#now make custom buttons
buttons = [
{
"name":"button1",
"type":"folder",
"children":[
{"name":"caption","type":"const","value":"start learner"},
{"name":"counter", "type": "variable", "value":0},
{"name": "onClick", "type": "referencer"}
]
}
]
self.create_node_from_path("root.visualization.widgets.timeseriesOne.buttons",{"type":"folder"})
self.create_nodes_from_template("root.visualization.widgets.timeseriesOne.buttons",buttons)
self.add_forward_refs("root.visualization.widgets.timeseriesOne.buttons.button1.onClick",["root.functions.counterFunction"])
#now the backgrounds
self.create_node_from_path("root.visualization.widgets.timeseriesOne.hasBackground",{"type":"const","value":True})
self.create_node_from_path("root.visualization.widgets.timeseriesOne.background",{"type":"referencer"})
self.add_forward_refs("root.visualization.widgets.timeseriesOne.background",["root.variables.back"])
self.create_node_from_path("root.visualization.widgets.timeseriesOne.backgroundMap",{"type":"const","value":{"1":"red","0":"green","-1":"blue","default":"white"}})
self.show()
elif testNo == 2:
#we take the full test number 1 and rearrange some things
self.create_test(1)
self.currentModelName = "occupancydemo"
import data.occupancy_data.occupancy as occ
occData = occ.read_occupancy("./data/occupancy_data/datatest2.txt")
#create an official table
template = [
{
"name": "description",
"type": "const",
"value": "this is the occupancy data table"
},
{
"name": "columns",
"type": "referencer",
},
{
"name": "timeField",
"type": "referencer",
},
{
"name": "variables",
"type": "folder",
}
]
self.create_node("root", name="occupancy", type="table")
self.create_nodes_from_template("root.occupancy", template=template)
for var in occData:
path = "root.occupancy.variables."+var
self.create_node_from_path(path,{"type":"column"})
self.set_value(path,occData[var])
self.add_forward_refs("root.occupancy.columns",[path])
self.add_forward_refs("root.occupancy.timeField",["root.occupancy.variables.date"])
#now create the classification
self.create_node("root.occupancy", name="classification", type="column")
self.set_value("root.occupancy.classification", [0]*len(occData[list(occData.keys())[0]]))
self.add_forward_refs("root.occupancy.columns", ["root.occupancy.classification"])
#create another TS-widget
self.create_node_from_path('root.visualization.widgets.timeseriesOccupancy', {"type": "widget"})
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy',modeltemplates.timeseriesWidget)
self.create_nodes_from_template('root.visualization.widgets.timeseriesOccupancy.buttons.button1',modeltemplates.button)
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectedVariables',["root.occupancy.variables.Temperature"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.selectableVariables',["root.occupancy.variables"])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.table',['root.occupancy'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.background',['root.occupancy.classification'])
self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "brown", "1": "yellow", "-1": "blue", "default": "white"}) #match annotation colors
#self.set_value('root.visualization.widgets.timeseriesOccupancy.backgroundMap', {"0": "blue", "1": "black", "-1": "blue", "default": "white"}) #match annotation colors
self.set_value('root.visualization.widgets.timeseriesOccupancy.hasAnnotation.tags',["busy","free"])
#now create the logistic regression
self.create_nodes_from_template('root',[self.templates["logisticregression.logisticRegressionTemplate"]])
self.add_forward_refs('root.logisticRegression.input',['root.occupancy.variables.Temperature', 'root.occupancy.variables.Light','root.occupancy.variables.CO2'])
self.add_forward_refs('root.logisticRegression.output', ['root.occupancy.classification'])
self.add_forward_refs('root.logisticRegression.annotations',['root.visualization.widgets.timeseriesOccupancy.hasAnnotation.newAnnotations'])
self.set_value('root.logisticRegression.categoryMap', {"busy": 1, "free": 0})
#also hook the button on it
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.buttons.button1.onClick',['root.logisticRegression'])
self.add_forward_refs('root.visualization.widgets.timeseriesOccupancy.observer',['root.logisticRegression.executionCounter']) # observe the execution of the scorer
self.show()
elif testNo == 3:
# make some nodes
for id in range(10):
self.create_node_from_path("root.add.var"+str(id), {"type": "variable", "value": id+100})
for id in range(100):
self.create_node_from_path("root.remove.var"+str(id), {"type": "variable", "value": id+100})
self.create_node_from_path("root.change_name_one")
self.create_node_from_path("root.change_value")
self.create_node_from_path("root.move.first")
self.create_node_from_path("root.move.second")
self.create_node_from_path("root.refs",properties={"type":"referencer"})
self.add_forward_refs("root.refs",["root.move.first","root.move.second","root.move"])
#now start a thread that changes the tree periodically
def __update_tree():
while True:
time.sleep(3.0)
with self.lock:
self.logger.debug("__update_tree")
self.create_node_from_path("root.add.dyn"+str(uuid.uuid4()))
removeFolder = self.get_id("root.remove")
if self.model[removeFolder]["children"]:
self.delete_node(self.model[removeFolder]["children"][0])
id = self.get_id("root.change_name_one")
if id:
self.model[id]["name"]="change_name_two"
else:
id = self.get_id("root.change_name_two")
self.model[id]["name"]="change_name_one"
id = self.get_id("root.move")
self.model[id]["children"].reverse()
id=self.get_id("root.refs")
self.model[id]["forwardRefs"].reverse()
self.set_value("root.change_value",int(uuid.uuid4())%100)
self.testThread = threading.Thread(target=__update_tree)
self.testThread.start()
if __name__ == '__main__':
def test1():
m=Model()
m.create_node("root",name="folder1")
m.create_node("root.folder1",name="folder2")
m.create_node("2",name="second")
m.create_node("root",name="myreferencer",type="referencer")
m.create_node("root.folder1",name="myvar",type="variable")
m.set_value("root.folder1.myvar",44.5)
m.add_forward_refs("root.myreferencer",["root.folder1"])
m.add_property("root.folder1.folder2","uasource","192.168.5.6")
m.show()
m.get_model()
m.delete_node("root.myreferencer")
return m
def test_template():
m=Model()
template = {
"myfunction": {
"type": "function",
"value": "someValue",
"opcua":"opc.tcp://129.160.1.1:4880::n2=2;s=mystrin"
},
"myreferencer": {
"type": "referencer",
"forwardRefs": ['.myfolder.var1', '.myfolder.var2', '.myfolder.var3']
},
"myfolder": {
"type": "folder",
"children": {
"var1": {"type": "const", "value": "1"},
"var2": {"type": "variable"},
"var3": {"type": "timeseries"},
}
},
}
m.create_nodes_from_template(template=template)
m.show()
def save_test():
print("save and load test")
m=Model()
m.create_test()
m.save("savetest")
n=Model()
n.load("savetest")
if len(n.get_model())!= len(m.get_model()):
print("unequal size")
return False
#now compare
mModel = m.get_model()
nModel = n.get_model()
for nodeId in mModel:
#print("check",nodeId)
try:
if nModel[nodeId]!=mModel[nodeId]:
print("unequal before after ",nodeId,m[nodeId],n[nodeId])
return False
except:
print("cant find",nodeId)
return False
print("savetest passed")
return True
def plugintest():
m=Model()
m.create_node("root", name="folder1")
m.create_nodes_from_template("root.folder1",m.templates["testfunction.delayFunctionTemplate"])
m.show()
m.execute_function("root.folder1.delayFunction")
statusNode = m.get_node("root.folder1.delayFunction.status")
progressNode = m.get_node("root.folder1.delayFunction.progress")
while(statusNode.get_value()!="finished"):
print("progress is",progressNode.get_value())
time.sleep(0.3)
print("execution re===================")
m.show()
def getnodetest():
m=Model()
m.create_node("root", name="folder1")
m.create_node("root.folder1", name="folder2")
m.create_node("root.folder1", name="myvar", type="variable")
myvar = m.get_node("root.folder1.myvar")
myvar.set_value(33)
print("value",myvar.get_value())
def testfunctions_test():
m = Model()
m.create_test(1)
m.show()
table= m.get_timeseries_table(["root.variables.f0","root.variables.f1","root.variables.time"],noBins=25)
print("shape",table.shape)
for row in table.T:
for elem in row:
print(str("%3.7f"%elem)," ",end="")
print("")
def time_conver_test():
d1=datetime.datetime(2018,1,1,0,0,0,tzinfo = pytz.UTC)
print(d1)
s1 = date2secs(d1)
print(s1)
d2 = secs2date(s1)
print(d2)
d3 ="2018-01-01T00:10:08.445+02:00"
print(d3)
d4=dateutil.parser.parse(d3)
print(d4)
s4=date2secs(d4)
print(s4)
d5=secs2date(s4)
print(d5)
def table_test():
m=Model()
print("this test creates a table and writes some data in")
template = [
{
"name": "type",
"type": "const",
"value": "timeSeriesTable"
},
{
"name":"description",
"type": "const",
"value": "this is a great table"
},
{
"name":"data",
"type":"folder",
"children":[
{"name":"var1","type": "column","value":[]},
{"name":"var2","type": "column","value":[]},
{"name":"var3","type": "column","value":[]},
{"name":"time","type": "column","value":[]}
]
},
{
"name":"columns",
"type": "referencer",
"forwardRefs": ['.data.var1', '.data.var2', '.data.var3',".data.time"]
},
{
"name":"timeField",
"type": "referencer",
"forwardRefs":['.data.time']
},
{
"name": "numberOfRows",
"type": "variable",
"value":0
}
]
m.create_node("root", name="mytable",type="table")
m.create_nodes_from_template("root.mytable",template=template)
m.show()
#now write some data with autocreates
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.mytable.data.var1":1,"root.mytable.data.var2":2,"root.mytable.data.time":myepoch,"root.mytable.data.newvar":99}
m.append_table(blob)
m.show()
#now add more data but leave out var
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.time": myepoch}
m.append_table(blob)
blob = {"root.mytable.data.var1": 10, "root.mytable.data.var2": 20, "root.mytable.data.var4": 4, "root.mytable.data.time": myepoch}
m.append_table(blob)
m.show()
def test_table_autocreate():
mytime = datetime.datetime.now(pytz.timezone("CET"))
myepoch=date2secs(mytime)
blob = {"root.data.var1":1,"root.data.var2":2,"root.folder.time":myepoch,"root.data.newvar":99}
m=Model()
m.append_table(blob)
m.show()
def test_create_from_path():
m=Model()
m.create_node_from_path("root.myfolder.myfolder2.var",{"type":"variable","value":33})
m.show()
def test_get_children():
m=Model()
m.create_test()
nodes = m.get_node_with_children('root.folder2')
#lastnode = '10'
#print(m.get_path(lastnode))
print(json.dumps(nodes,indent=4))
def test_create():
m=Model()
m.create_test(1)
m.show()
def test_get_forwards():#
#in this test, we check the forwards get results over folders, referencers etc.
m=Model()
m.create_node_from_path("root.folder.var1",{"type":"variable"})
m.create_node_from_path("root.folder.var2", {"type": "variable"})
m.create_node_from_path("root.folder.var3", {"type": "variable"})
m.create_node_from_path("root.ref1", {"type": "referencer"})
m.create_node_from_path("root.ref2", {"type": "referencer"})
m.add_forward_refs("root.ref1",["root.folder"])
m.add_forward_refs("root.ref2", ["root.ref1"])
m.show()
res=m.get_leaves("root.ref1")
print(res)
for k in res:
print(k["name"])
res = m.get_leaves("root.ref2")
for k in res:
print(k["name"])
def pickle_save():
import pickle
m=Model()
m.create_test(2)
# write python dict to a file
output = open('pickle_save.pkl', 'wb')
pickle.dump(m.get_model(), output)
output.close()
n=Model()
# read python dict back from the file
pkl_file = open('pickle_save.pkl', 'rb')
restore = pickle.load(pkl_file)
pkl_file.close()
print("compare after pickle restre",restore==m.get_model())
if __name__ == '__main__':
#############
#test1()
#ts_test1()
#test_template()
save_test()
pickle_save()
#plugintest()
#getnodetest()
#table_query_test()
#testfunctions_test()
#time_conver_test()
#test_create_from_path()
#table_test()
#test_table_autocreate()
#test_get_children()
#test_get_forwards()
#test_create()
#read in the commmand line options:
# demo1: create the test for the demo1, and store it in file (option2)
#
if len(sys.argv) > 1:
if sys.argv[1] == "demo1":
fileName = sys.argv[2]
print("creating demo and save as ",fileName)
m = Model()
m.create_test()
m.show()
fileName = sys.argv[2]
m.save(fileName)
|
import random
def busqueda_binaria(lista, comienzo, final, objetivo):
print(f"buscando {objetivo} entre {lista[comienzo]} y {lista[final - 1]}")
if comienzo > final:
return False
medio = (comienzo + final) // 2
if lista[medio] == objetivo:
return True
elif lista[medio] < objetivo:
return busqueda_binaria(lista, medio + 1, final, objetivo)
else:
return busqueda_binaria(lista, comienzo, medio - 1, objetivo)
if __name__ == "__main__":
tamano_de_lista = int(input("De que tamano es la lista? "))
objetivo = int(input("Que numero quieres encontrar? "))
lista = sorted([random.randint(0, 100) for i in range(tamano_de_lista)])
encontrado = busqueda_binaria(lista, 0, len(lista), objetivo)
print(lista)
print(f'El elemento {objetivo} {'esta' if encontrado else 'no esta'} en la lista')
| import random
def busqueda_binaria(lista, comienzo, final, objetivo):
print(f"buscando {objetivo} entre {lista[comienzo]} y {lista[final - 1]}")
if comienzo > final:
return False
medio = (comienzo + final) // 2
if lista[medio] == objetivo:
return True
elif lista[medio] < objetivo:
return busqueda_binaria(lista, medio + 1, final, objetivo)
else:
return busqueda_binaria(lista, comienzo, medio - 1, objetivo)
if __name__ == "__main__":
tamano_de_lista = int(input("De que tamano es la lista? "))
objetivo = int(input("Que numero quieres encontrar? "))
lista = sorted([random.randint(0, 100) for i in range(tamano_de_lista)])
encontrado = busqueda_binaria(lista, 0, len(lista), objetivo)
print(lista)
print(f'El elemento {objetivo} {"esta" if encontrado else "no esta"} en la lista')
|
import psutil
import os
import re
from enum import Enum
from .logical import Logic
from .detect import DetectBase
class DiskDetectMode(Enum):
PERCENT = {"name": "percent", "logic": Logic.GT}
USAGE = {"name": "usage", "logic": Logic.GT}
FREE = {"name": "free", "logic": Logic.LT}
class DiskTypeError(TypeError):
pass
class DiskValueError(ValueError):
pass
class DiskDetect(DetectBase):
def __init__(
self,
path,
threshold,
mode=DiskDetectMode.PERCENT.value["name"],
):
if not isinstance(path, str):
raise DiskTypeError("path must be string type.")
self._path = path
if not isinstance(threshold, int) and not isinstance(threshold, float):
raise DiskTypeError("threshold must be float or int type.")
self._threshold = threshold
if not isinstance(mode, str):
raise DiskTypeError("mode must be string type.")
if len([x for x in DiskDetectMode
if re.match(
rf'{x.value['name']}',
f'{mode}',
flags=re.IGNORECASE
) is not None]) == 0:
raise DiskValueError(
f"""{mode} is invalid value.-\
valid value {[x.value['name'] for x in DiskDetectMode]}"""
)
self._mode = [
x for x in DiskDetectMode
if re.match(
rf'{x.value['name']}',
f'{mode}',
flags=re.IGNORECASE
) is not None][0]
@property
def path(self):
return self._path
@property
def resource(self):
return "disk"
@property
def threshold(self):
return self._threshold
@property
def mode(self):
return self._mode
@property
def check(self):
"""
Check over Disk threshold.
over threshold: return False
within threshold: return True
"""
res = eval(f'self.{self._mode.value['name']}(self.path)')
if res is None:
raise DiskValueError("disk value must be not None.")
if eval(f"{res} {self._mode.value["logic"].value} {self._threshold}"):
return False
else:
return True
@staticmethod
def _usage(path):
if not isinstance(path, str):
raise TypeError("path must be string type.")
if not os.path.exists(path):
raise FileNotFoundError(f"{path} not exists")
return psutil.disk_usage(path)
@staticmethod
def percent(path):
return DiskDetect._usage(path).percent
@staticmethod
def free(path):
return DiskDetect._usage(path).free
| import psutil
import os
import re
from enum import Enum
from .logical import Logic
from .detect import DetectBase
class DiskDetectMode(Enum):
PERCENT = {"name": "percent", "logic": Logic.GT}
USAGE = {"name": "usage", "logic": Logic.GT}
FREE = {"name": "free", "logic": Logic.LT}
class DiskTypeError(TypeError):
pass
class DiskValueError(ValueError):
pass
class DiskDetect(DetectBase):
def __init__(
self,
path,
threshold,
mode=DiskDetectMode.PERCENT.value["name"],
):
if not isinstance(path, str):
raise DiskTypeError("path must be string type.")
self._path = path
if not isinstance(threshold, int) and not isinstance(threshold, float):
raise DiskTypeError("threshold must be float or int type.")
self._threshold = threshold
if not isinstance(mode, str):
raise DiskTypeError("mode must be string type.")
if len([x for x in DiskDetectMode
if re.match(
rf'{x.value["name"]}',
f'{mode}',
flags=re.IGNORECASE
) is not None]) == 0:
raise DiskValueError(
f"""{mode} is invalid value.-\
valid value {[x.value['name'] for x in DiskDetectMode]}"""
)
self._mode = [
x for x in DiskDetectMode
if re.match(
rf'{x.value["name"]}',
f'{mode}',
flags=re.IGNORECASE
) is not None][0]
@property
def path(self):
return self._path
@property
def resource(self):
return "disk"
@property
def threshold(self):
return self._threshold
@property
def mode(self):
return self._mode
@property
def check(self):
"""
Check over Disk threshold.
over threshold: return False
within threshold: return True
"""
res = eval(f'self.{self._mode.value["name"]}(self.path)')
if res is None:
raise DiskValueError("disk value must be not None.")
if eval(f"{res} {self._mode.value['logic'].value} {self._threshold}"):
return False
else:
return True
@staticmethod
def _usage(path):
if not isinstance(path, str):
raise TypeError("path must be string type.")
if not os.path.exists(path):
raise FileNotFoundError(f"{path} not exists")
return psutil.disk_usage(path)
@staticmethod
def percent(path):
return DiskDetect._usage(path).percent
@staticmethod
def free(path):
return DiskDetect._usage(path).free
|
import discord
from discord.ext import commands
import requests
import asyncio
from os import environ
class HyLink(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="verify")
async def verify_(self, ctx):
await ctx.send("✉️ Instructions sent in direct messages!")
embed = discord.Embed(
description=f'**Instructions:**\n1) Use your Minecraft client to connect to Hypixel.\n2) Once connected, and while in the lobby, right click "My Profile" in your hotbar. It is option #2.\n3) Click "Social Media" - this button is to the left of the Redstone block (the "Status" button).\n4) Click "Discord" - it is the second last option.\n5) Paste your Discord username into chat and hit enter. For reference your username is: `{ctx.author}`.\n6) You\'re done! Wait around 30 seconds and then click the :white_check_mark: reaction to continue.',
color=discord.Colour.purple(),
)
embed.set_image(
url="https://thumbs.gfycat.com/DentalTemptingLeonberger-size_restricted.gif"
)
embed.set_author(
name="Link your Hypixel Profile", icon_url=self.bot.user.avatar_url
)
message = await ctx.author.send(embed=embed)
await message.add_reaction("✅")
def check(reaction, user):
return str(reaction.emoji) in ["✅"] and user != self.bot.user
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=450
)
except asyncio.TimeoutError:
await ctx.author.send("**Timeout:** You didn't react in time.")
else:
if str(reaction.emoji) == "✅":
embed = discord.Embed(color=discord.Colour.purple())
embed.set_author(
name="Please enter your Minecraft username.",
icon_url=self.bot.user.avatar_url,
)
await ctx.author.send(embed=embed)
ign = await self.bot.wait_for("message")
r = requests.get(
f"https://api.hypixel.net/player?key={environ.get("API_KEY")}&name={ign.content}"
)
if r.json()["player"]["socialMedia"]["links"]["DISCORD"] == ctx.author:
await ctx.author.send("✅ You've been verified!")
await ctx.author.edit(nick=ign.content)
await ctx.author.add_roles("Verified")
else:
await ctx.author.send(
":x: Couldn't verify you, this could be due to you not having your discord linked, or your IGN is incorrect."
)
bot = commands.Bot(command_prefix="!", intents=intents=discord.Intents(guilds=True, messages=True, reactions=True))
bot.add_cog(HyLink(bot))
bot.run(environ.get("TOKEN"))
| import discord
from discord.ext import commands
import requests
import asyncio
from os import environ
class HyLink(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="verify")
async def verify_(self, ctx):
await ctx.send("✉️ Instructions sent in direct messages!")
embed = discord.Embed(
description=f'**Instructions:**\n1) Use your Minecraft client to connect to Hypixel.\n2) Once connected, and while in the lobby, right click "My Profile" in your hotbar. It is option #2.\n3) Click "Social Media" - this button is to the left of the Redstone block (the "Status" button).\n4) Click "Discord" - it is the second last option.\n5) Paste your Discord username into chat and hit enter. For reference your username is: `{ctx.author}`.\n6) You\'re done! Wait around 30 seconds and then click the :white_check_mark: reaction to continue.',
color=discord.Colour.purple(),
)
embed.set_image(
url="https://thumbs.gfycat.com/DentalTemptingLeonberger-size_restricted.gif"
)
embed.set_author(
name="Link your Hypixel Profile", icon_url=self.bot.user.avatar_url
)
message = await ctx.author.send(embed=embed)
await message.add_reaction("✅")
def check(reaction, user):
return str(reaction.emoji) in ["✅"] and user != self.bot.user
try:
reaction, user = await self.bot.wait_for(
"reaction_add", check=check, timeout=450
)
except asyncio.TimeoutError:
await ctx.author.send("**Timeout:** You didn't react in time.")
else:
if str(reaction.emoji) == "✅":
embed = discord.Embed(color=discord.Colour.purple())
embed.set_author(
name="Please enter your Minecraft username.",
icon_url=self.bot.user.avatar_url,
)
await ctx.author.send(embed=embed)
ign = await self.bot.wait_for("message")
r = requests.get(
f"https://api.hypixel.net/player?key={environ.get('API_KEY')}&name={ign.content}"
)
if r.json()["player"]["socialMedia"]["links"]["DISCORD"] == ctx.author:
await ctx.author.send("✅ You've been verified!")
await ctx.author.edit(nick=ign.content)
await ctx.author.add_roles("Verified")
else:
await ctx.author.send(
":x: Couldn't verify you, this could be due to you not having your discord linked, or your IGN is incorrect."
)
bot = commands.Bot(command_prefix="!", intents=intents=discord.Intents(guilds=True, messages=True, reactions=True))
bot.add_cog(HyLink(bot))
bot.run(environ.get("TOKEN"))
|
"""
Start from a directory of images and solve for all of the particle positions, orientations, and forces.
"""
import numpy as np
import os
import cv2
import time
import pickle
import inspect
import ast
import tqdm
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from IPython.display import clear_output
import pepe
from pepe.preprocess import checkImageType, lightCorrectionDiff, circularMask
from pepe.analysis import initialForceSolve, forceOptimize, gSquared, g2ForceCalibration, singleParticleForceBalance, forceOptimizeArgDTypes
from pepe.tracking import houghCircle, convCircle, angularConvolution, circleTrackArgDTypes
from pepe.simulate import genSyntheticResponse
from pepe.utils import preserveOrderArgsort, rectangularizeForceArrays, explicitKwargs, parseList
from pepe.visualize import genColors, visCircles, visForces, visContacts, visRotation
from pepe.topology import findPeaksMulti
# All of the dtypes of the args for the below method
# The following args are not included, because they are not
# important: progressBarOffset, progressBarTitle
forceSolveArgDTypes = {"imageDirectory": str,
"imageExtension": str,
"imageEndIndex": int,
"imageStartIndex": int,
"carryOverAlpha": bool,
"carryOverForce": bool,
"showProgressBar": bool,
"lightCorrectionImage": str,
"lightCorrectionVerticalMask": str,
"lightCorrectionHorizontalMask": str,
"g2CalibrationImage": str,
"g2CalibrationCutoffFactor": float,
"maskImage": str,
"cropXMin": int,
"cropXMax": int,
"circleDetectionMethod": str,
"guessRadius": float,
"fSigma": float,
"pxPerMeter": float,
"brightfield": bool,
"contactPadding": int,
"g2MaskPadding": int,
"contactMaskRadius": int,
"peBlurKernel": int,
"requireForceBalance": bool,
"circleTrackingChannel": int,
"circleTrackingKwargs": dict,
"photoelasticChannel": int,
"optimizationKwargs": dict,
"maxBetaDisplacement": float,
"forceNoiseWidth": float,
"alphaNoiseWidth": float,
"saveMovie": bool,
"pickleArrays": bool,
"outputRootFolder": str,
"outputExtension": str,
"genFitReport": bool,
"performOptimization": bool,
"inputSettingsFile": str,
"debug": bool}
# Decorator that allows us to identify which keyword arguments were explicitly
# passed to the function, and which were left as default values. See beginning
# of method code for more information/motivation.
@explicitKwargs()
def forceSolve(imageDirectory, guessRadius=0.0, fSigma=0.0, pxPerMeter=0.0, brightfield=True, contactPadding=15, g2MaskPadding=2, contactMaskRadius=30, lightCorrectionImage=None, lightCorrectionHorizontalMask=None, lightCorrectionVerticalMask=None, g2CalibrationImage=None, g2CalibrationCutoffFactor=.9, maskImage=None, cropXMin=None, cropXMax=None, peBlurKernel=3, imageExtension='bmp', requireForceBalance=False, imageStartIndex=None, imageEndIndex=None, carryOverAlpha=True, carryOverForce=True, circleDetectionMethod='convolution', circleTrackingKwargs={}, circleTrackingChannel=0, maxBetaDisplacement=.5, photoelasticChannel=1, forceNoiseWidth=.03, alphaNoiseWidth=.01, optimizationKwargs={}, performOptimization=True, debug=False, showProgressBar=True, progressBarOffset=0, progressBarTitle=None, saveMovie=False, outputRootFolder='./', inputSettingsFile=None, pickleArrays=True, genFitReport=True, outputExtension=''):
"""
Complete pipeline to solve for forces and particle positions for all image files
in a directory. Results will be returned and potentially written to various files.
See `Returns` section for more information
Expects all particles to be the same (or very similar) sizes. This assumption is made
by the calculation of the gradient squared calibration value, which is computed just
once using the guess of the radii. This should not be a problem if the radii are only
slightly varied (~10 pixels or something) but any more than that and errors will begin
to accumulate.
This method has **a lot** of arguments; it is intended to be used once reasonable
values for all of these have already been found. While the `debug` option for this function
is very helpful, it is recommended to utilize the various notebooks/examples to find good
choices for parameters first.
The output readme file can also serve as a cache of the parameter values/settings, which
can be passed back to future calls of this method using the `inputSettingsFile` argument.
Parameters
----------
imageDirectory : str
An absolute or relative path to the directory containing the images that are
to be analyzed. The names of the images need not follow any particular naming
scheme, but they should be such that sorting the list alphabetically will give
the images in their proper order.
guessRadius : float
The radius of the particles to be detected, in pixels. As of now, this value will
be taken as the particle radius, but future versions may be able to vary this to
find the optimal value.
Currently no support for particles of different sizes.
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light and
other material property (denoted as C in most literature; sometimes also called the
"stress optic coefficient").
pxPerMeter : float
The number of pixels per meter in the images. Depends on the camera, lens, and
zoom settings used to capture the images.
Note that this is **not** the inverse of the number of pixels per meter, as is used
in some of the other force solving implementations.
brightfield : bool
Whether the images are captured using a brightfield polariscope (`True`) or
a darkfield polariscope (`False`).
contactPadding : int
Maximum distance (in pixels) between a particle's edge and the wall or the edges of two
particles that will still be considered a potential force-bearing contact.
g2MaskPadding : int or float
Number of pixels to ignore at the edge of each particle when calculating the average G^2.
If float value < 1 is passed, gradient mask radius will be taken as that percent of the full
particle radius. A value of 0 means no padding is included.
contactMaskRadius : float
The radius of the circular mask that will be constructed around each contact to estimate
the magnitude of the force using the gradient squared in that region.
lightCorrectionImage : str or np.ndarray[H,W[,C]]
The path to an image (or an array representing an image) that contains no particles, and
can be used to correct for any light gradients present in the actual data.
lightCorrectionHorizontalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the horizontal light correction should be calculated. Can also be a path to an image.
lightCorrectionVerticalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the vertical light correction should be calculated. Can also be a path to an image.
g2CalibrationImage : str or np.ndarray[H,W,C]
An image with a single particle (or at least one particle) that has no force acting on it.
Used to determine the base level of gradient squared (due to noise) for a free particle. Can
also be a path to an image.
g2CalibrationCutoffFactor : float
The factor that is multipled by the mean gradient squared value of the particles in the
calibration image. Any particle that has an average gradient squared value below the
calibration value multipled by this factor will be assumed to have no forces acting on it.
maskImage : str or np.ndarray[H,W,C]
A mask array, containing values of `0` or `1`, with the latter representing the regions of
importance for the image. Used in detecting particles, generating initial guesses, and
calculating error during non-linear optimization. can also be a path to an image.
cropXMin : int or None
Left bound to crop down the image in the x direction.
cropXMax : int or None
Right bound to crop down the image in the x direction.
peBlurKernel : int
The kernel size that will be used for bluring the photoelastic channel of each image, to
reduce noise. Should be an odd integer.
imageExtension : str
The extension of the image files that will be read in from `imageDirectory`. Should not include
the '.' before the extension.
requireForceBalance : bool
Whether to impose particle-wise force balance at each step (`True`) or to take the results of
the optimization process as they are (`False`).
Currently WIP, and does not do anything.
forceBalanceWeighting : float
If a non-zero positive value, adds a contribution to the optimization cost
pertaining to how well the ensemble of forces satisfy force balance.
imageStartIndex : int or None
The index of which image to start at when analyzing the files in `imageDirectory`. Value
of `None` will start at the first (alphabetically sorted) image.
imageEndIndex : int or None
The index of which image to end at when analyzing the files in `imageDirectory`. Value
of `None` will end at the last (alphabetically sorted) image.
circleDetectionMethod : ['convolution' or 'hough']
Whether to use the convolution or hough circle detection method to identify particles.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingKwargs : **kwargs
Keyword arguments to be passed to the selected circle tracking function.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingChannel : int
The channel of the image that will be used to track the particles. `0` for red, `1` for
green, and `2` for blue.
maxBetaDisplacement : float
The maximum distance (angle) that a force can move between frames and still be identified
as the same force. If a force moves more than this value, it will still be recorded as a force,
but will be considered a new and independent force from any of the ones in the previous frame.
photoelasticChannel : int
The channel of the image that will be used to gauge the photoelastic response. `0` for red, `1` for
green, and `2` for blue.
forceNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
force guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
alphaNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
alpha guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
optimizationKwargs : **kwargs
Keyword arguments to be passed to the optimization process.
For more information, see `pepe.analysis.forceOptimize()`.
performOptimization : bool
Whether or not to perform optimization on the particles.
Mostly included as a debug option, but any real data analysis should
utilize the optimization, as the initial guessing is often not nearly
accurate enough to get any real results.
debug : bool
Whether to print progress updates for each frame to the screen (`True`) or not (`False`).
showProgressBar : bool
Whether to show a progress bar throughout the analysis (`True`) or not (`False`). Uses
`tqdm` library.
progressBarOffset : int
The number of lines to offset the progress bar by. Generally an internal variable
used when multiple threads are active.
progressBarTitle : str
The text to be written to the left of the progress bar. Generally an internal variable
controlled by some solving script.
saveMovie : bool
Whether to save a compiled gif of the reconstructed forces at each frame at the end (`True`)
or not (`False`).
outputRootFolder : str
The location where the output folder (potentially containg the movie, pickle files, readme, etc.)
will be created. Output folder itself will be named after the `imageDirectory`, with '_Synthetic'
appended to the end.
pickleArrays : bool
Whether to save the forces, betas, alphas, centers, and radii as pickle files (`True`) or not (`False`).
Files will be located in the output folder (see `outputRootFolder`).
inputSettingsFile : str
Path to a readme file containg parameters for the solving process, likely generated from
a previous iteration of the program. Explicitly passed arguments will override those that
are included in the settings file.
Currently WIP and does not do anything.
genFitReport : bool
Whether or not to generate a fit report of the results, including errors per frame,
examinations of all particles/forces, and settings, compiled in a latex pdf.
Will generate both the compiled file 'FitReport.pdf' and the source directory
'FitReport_src/'.
Returns
-------
rectForceArr : list[P](np.ndarray[F,T])
A list of arrays representing the force magnitude for each force on each particle.
rectAlphaArr : list[P](np.ndarray[F,T])
A list of arrays representing the alpha angle for each force on each particle.
rectBetaArr : list[P](np.ndarray[F,T])
A list of arrays representing the beta angle for force on each particle.
rectCenterArr : np.ndarray[P,T,2]
Particle centers for each timestep. Elements take on a value of `[np.nan, np.nan]`
if the particle does not exist for a given timestep.
rectRadiusArr : np.ndarray[P,T]
Particle radii for each timestep. Elements take on a value of `np.nan` if the particle
does not exist for a given timestep.
Depending on kwarg values, several files may be written created in the output
folder, which will be located in `outputRootFolder` and named according
to: '<`imageDirectory`>_Synthetic/'.
"""
overallStartTime = time.perf_counter()
# For the sake of saving the options to a readme file (and potentially)
# reading them back out, it is easiest to keep all of the settings in a
# dictionary
# We have 3 layers of precedence for reading in settings:
# 1. Explicitly passed kwarg
# 2. Read in from settings file
# 3. Default value of a kwarg
# So we assign the elements of our settings dict in opposite order
# 3. All of the default values
# The following variables are not present:
# progressBarOffset, progressBarTitle
# This is because we don't care about saving them
settings = {"imageDirectory": os.path.abspath(imageDirectory) + '/', # Convert to absolute path
"imageExtension": imageExtension,
"imageEndIndex": imageEndIndex,
"imageStartIndex": imageStartIndex,
"carryOverAlpha": carryOverAlpha,
"carryOverForce": carryOverForce,
"lightCorrectionImage": lightCorrectionImage,
"lightCorrectionVerticalMask": lightCorrectionVerticalMask,
"lightCorrectionHorizontalMask": lightCorrectionHorizontalMask,
"g2CalibrationImage": g2CalibrationImage,
"g2CalibrationCutoffFactor": g2CalibrationCutoffFactor,
"maskImage": maskImage,
"cropXMin": cropXMin,
"cropXMax": cropXMax,
"circleDetectionMethod": circleDetectionMethod,
"guessRadius": guessRadius,
"fSigma": fSigma,
"pxPerMeter": pxPerMeter,
"brightfield": brightfield,
"contactPadding": contactPadding,
"g2MaskPadding": g2MaskPadding,
"contactMaskRadius": contactMaskRadius,
"peBlurKernel": peBlurKernel,
"requireForceBalance": requireForceBalance,
"circleTrackingChannel": circleTrackingChannel,
"photoelasticChannel": photoelasticChannel,
"maxBetaDisplacement": maxBetaDisplacement,
"forceNoiseWidth": forceNoiseWidth,
"alphaNoiseWidth": alphaNoiseWidth,
"showProgressBar": showProgressBar,
"saveMovie": saveMovie,
"pickleArrays": pickleArrays,
"outputRootFolder": outputRootFolder,
"outputExtension": outputExtension,
"genFitReport": genFitReport,
"performOptimization": performOptimization,
"debug": debug}
# For the next step, we will need to know all of the data types of each
# argument (to properly cast). Because certain arguments have None as a default
# value, we can't automatically generate this information.
# See above this method for the list of these, since they are also used
# in the TrialObject file
# We need to do the same thing for the kwargs for both
# circle tracking and optimization
# These have both been moved to either the tracking/DTypes.py
# or the analysis/ForceSolve.py files, respectively
# Now add all the dictionaries together
argDTypes = forceSolveArgDTypes.copy()
argDTypes.update(circleTrackArgDTypes)
argDTypes.update(forceOptimizeArgDTypes)
# 2. Anything read in from a settings file
# Note that it works to our advantage that we already have values for most entries,
# since the settings file doesn't include type information, so we need the old
# values to cast properly. 1. is actually contained in here as well, because
# we can just check to see if that variable was explicitly passed before overwriting it.
# 1. The kwargs that are explicitly passed
# This one is a little tricky, because there isn't a super great way by default
# to differentiate whether a kwarg is explicitly passed or is its default value
# (without just keeping a list of default values). I also don't want to
# replace the entire function signature with (*args, **kwargs) because then the
# documentation would not be as good (I think). So the solution here is to define
# a decorator that has the (*args, **kwargs) signature, and to create an attribute
# of this method that is a list of the kwargs that are explicitly passed to the
# decorator. See `pepe.utils.explicitKwargs()` for more info.
if inputSettingsFile is not None:
if os.path.exists(inputSettingsFile):
fileObj = open(inputSettingsFile, 'r')
for line in fileObj:
# Check each line and see if it looks like a dictionary value
split = line.split(':')
# Read settings into the master settings file
if len(split) == 2 and split[0].strip() in argDTypes.keys() and not split[0].strip() in forceSolve.explicit_kwargs:
# Cast to the type of the value already in the dict
if split[1].strip() == 'None':
settings[split[0].strip()] = None
else:
if '[' in split[1]:
settings[split[0].strip()] = parseList(split[1].strip(), dtype=argDTypes[split[0].strip()])
else:
# Bools need a special condition
if argDTypes[split[0].strip()] is bool:
val = split[1].strip() == 'True'
else:
val = argDTypes[split[0].strip()](split[1].strip())
settings[split[0].strip()] = val
else:
print(f'Warning: provided settings file does not exist! Attempting to run regardless...')
# While the following variables all have a default value of 0, they cannot actually
# be left as this value. The reason they have a default value is so that if these
# values are indicated by a settings file, we don't want to have to enter them again.
# So here, we make sure we have values for them all, either explicitly passed or read in.
requiredVars = ["guessRadius", "fSigma", "pxPerMeter"]
for r in requiredVars:
assert settings[r] != 0, f'Error: {r} value not supplied explicitly or implicitly!'
# Now carry over the kwargs that are sent to the optimization procedure into that
# dictionary. We can find the names of arguments by using the `inspect` library
possibleOptimKwargs = list(inspect.signature(forceOptimize).parameters.keys())
for pkw in possibleOptimKwargs:
if pkw in settings.keys():
optimizationKwargs[pkw] = settings[pkw]
# We want to do the same thing for the circle tracking function, but we don't
# yet know which circle tracking function we are using yet, so we'll carry
# that over a bit later.
# Find all images in the directory
imageFiles = os.listdir(settings["imageDirectory"])
# This goes before the sorting/extension filtering so we can get more specific
# error messages (and we have another one of these below)
if len(imageFiles) < 1:
print(f'Error: directory {imageDirectory} contains no files!')
return None
imageFiles = np.sort([img for img in imageFiles if img[-len(settings["imageExtension"]):] == settings["imageExtension"]])
# We have to do the end index first, so it doesn't mess up the start one
if settings["imageEndIndex"] is not None:
imageFiles = imageFiles[:min(settings["imageEndIndex"], len(imageFiles))]
if settings["imageStartIndex"] is not None:
imageFiles = imageFiles[max(settings["imageStartIndex"], 0):]
# Make sure we still have some proper images
if len(imageFiles) < 1:
print(f'Error: directory \'{settings['imageDirectory']}\' contains no files with extension \'{settings['imageExtension']}\'!')
return None
xB = [settings["cropXMin"], settings["cropXMax"]]
imageSize = checkImageType(settings["imageDirectory"] + imageFiles[0])[:,xB[0]:xB[1],0].shape
# This will calculation the light correction across the images
if settings["lightCorrectionImage"] is not None:
# Convert to absolute paths if they are paths
if type(settings["lightCorrectionImage"]) is str:
settings["lightCorrectionImage"] = os.path.abspath(settings["lightCorrectionImage"])
if type(settings["lightCorrectionVerticalMask"]) is str:
settings["lightCorrectionVerticalMask"] = os.path.abspath(settings["lightCorrectionVerticalMask"])
if type(settings["lightCorrectionHorizontalMask"]) is str:
settings["lightCorrectionHorizontalMask"] = os.path.abspath(settings["lightCorrectionHorizontalMask"])
cImageProper = checkImageType(settings["lightCorrectionImage"])[:,xB[0]:xB[1]]
vMask = checkImageType(settings["lightCorrectionVerticalMask"])[:,xB[0]:xB[1]]
hMask = checkImageType(settings["lightCorrectionHorizontalMask"])[:,xB[0]:xB[1]]
if vMask.ndim == 3:
vMask = vMask[:,:,0]
if hMask.ndim == 3:
hMask = hMask[:,:,0]
lightCorrection = lightCorrectionDiff(cImageProper, vMask, hMask)
trackCorrection = lightCorrection[:,:,settings["circleTrackingChannel"]]
peCorrection = lightCorrection[:,:,settings["photoelasticChannel"]]
else:
# It probably isn't great hygiene to have this variableflip between a single
# value and an array, but you can always add a scalar to a numpy array, so
# this is the easiest way (since we haven't loaded any images yet)
trackCorrection = 0
peCorrection = 0
# Load up the mask image, which will be used to remove parts of the images
# that we don't care about, and also potentially indicate which particles
# are close to the boundary.
if settings["maskImage"] is not None:
maskArr = checkImageType(settings["maskImage"])[:,xB[0]:xB[1]]
ignoreBoundary = False
else:
# Same deal as above: scalar multiplication functions exactly how we want
# in the case that we don't have a mask, so it's just easier to do this.
maskArr = 1
ignoreBoundary = True
# Which method we will be using to detect circles
if settings["circleDetectionMethod"] == 'convolution':
circFunc = convCircle
elif settings["circleDetectionMethod"] == 'hough':
circFunc = houghCircle
else:
print(f'Error: circle detection option \'{settings['circleDetectionMethod']}\' not recognized!')
return None
# Now that we have a circle tracking function, we can carry over any possible kwargs
possibleCircleKwargs = list(inspect.signature(circFunc).parameters.keys())
for pkw in possibleCircleKwargs:
if pkw in settings.keys():
circleTrackingKwargs[pkw] = settings[pkw]
# Calculate the lowest g2 value that we care about, so we can throw everything
# that is below that away when solving (optional)
checkMinG2 = False
if settings["g2CalibrationImage"] is not None:
g2CalImage = checkImageType(settings["g2CalibrationImage"])[:,xB[0]:xB[1]]
g2CalPEImage = cv2.blur((g2CalImage[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# Locate particles
centers, radii = circFunc((g2CalImage[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# There should only be 1 particle in the calibration image
if len(centers) < 0:
print(f'Warning: Gradient-squared calibration image does not contain any particles! Ignoring...')
else:
particleMask = circularMask(g2CalPEImage.shape, centers[0], radii[0])[:,:,0]
gSqr = gSquared(g2CalPEImage)
minParticleG2 = np.sum(gSqr * particleMask) / np.sum(particleMask) * settings["g2CalibrationCutoffFactor"]
checkMinG2 = True
# The arrays that we will be building for each timestep. It is better to just
# use an untyped list since the arrays are all triangular and whatnot.
centersArr = []
radiiArr = []
forceArr = []
betaArr = []
alphaArr = []
imageArr = []
errorArr = []
# For keeping track of time (though will only be display if debug=True)
trackingTimes = np.zeros(len(imageFiles))
initialGuessTimes = np.zeros(len(imageFiles))
optimizationTimes = np.zeros(len(imageFiles))
miscTimes = np.zeros(len(imageFiles))
totalFailedParticles = 0
errorMsgs = []
if settings["showProgressBar"]:
bar = tqdm.tqdm(total=len(imageFiles)+1, position=progressBarOffset, desc=progressBarTitle)
# Calculate the gradient-squared-to-force calibration value
g2Cal = g2ForceCalibration(settings["fSigma"], settings["guessRadius"], settings["pxPerMeter"])
# The big loop that iterates over every image
for i in range(len(imageFiles)):
image = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1]]
# Convert to floats on the domain [0,1], so we can compare to the output of
# genSyntheticResponse()
peImage = cv2.blur((image[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# -------------
# Track circles
# -------------
start = time.perf_counter()
centers, radii = circFunc((image[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# We do some indexing using the centers/radii, so it is helpful
# to have them as an integer type
centers = centers.astype(np.int64)
radii = radii.astype(np.int64)
# We want to keep the order of particles constant, so we make sure
# that they are (to whatever extent possible) in the same order
# as the previous frame. This involves finding the closest neighbor
# from the previous frame.
if len(centersArr) > 0:
centerOrder = preserveOrderArgsort(centersArr[-1], centers, padMissingValues=False)
centers = centers[centerOrder]
radii = radii[centerOrder]
trackingTimes[i] = time.perf_counter() - start
# ----------------------
# Generate initial guess
# ----------------------
# We run the initial guess regardless of whether we are going to overwrite
# with values from the previous frame. This is because the beta values
# are caluclated via the contact network, which should not be carried over
# (since the particles are moving).
forceGuessArr, alphaGuessArr, betaGuessArr = initialForceSolve(peImage,
centers, radii, settings["fSigma"], settings["pxPerMeter"],
settings["contactPadding"], settings["g2MaskPadding"],
contactMaskRadius=settings["contactMaskRadius"],
boundaryMask=maskArr, ignoreBoundary=ignoreBoundary, g2Cal=g2Cal)
if len(centersArr) > 0:
# If we have added/lost particles, we want to carry over the previous values where
# possible, and otherwise take the results of initialForceSolve
# Note that this is the complement to the center order calculated previously:
# this orders the old centers according the new ones.
# We make the assumption that a particle cannot travel more than it's radius in a single frame
oldCenterOrder = preserveOrderArgsort(centers, centersArr[-1], padMissingValues=True, maxDistance=settings["guessRadius"])
# Now find each new particle's old counterpart (if it exists), and then
# line up the forces using the value of beta, such that we can (optionally)
# carry over force magnitudes and alpha values.
for j in range(len(betaGuessArr)):
if oldCenterOrder[j] is None:
continue
# maxBetaDisplacement should be an angle value (in radians) that a force would
# never move in a single frame, but is large enough to not lose a force if it
# moves because of noise/small fluctuations.
forceOrder = preserveOrderArgsort(betaGuessArr[j], betaArr[-1][oldCenterOrder[j]], padMissingValues=True, maxDistance=settings["maxBetaDisplacement"])
#print(f'frame {i}, particle {j}: {forceOrder}')
for k in range(len(forceGuessArr[j])):
if forceOrder[k] is not None:
if settings["carryOverForce"]:
forceGuessArr[j][k] = forceArr[-1][oldCenterOrder[j]][forceOrder[k]]
if settings["carryOverAlpha"]:
alphaGuessArr[j][k] = alphaArr[-1][oldCenterOrder[j]][forceOrder[k]]
# In this case, we want to add a small randomly generated contribution
# so that the algorithm doesn't get stuck in some incorrect loop and so that it
# explores a little more of the parameter space to find a nice minimum at each step
if settings["forceNoiseWidth"] is not None:
forceGuessArr = [np.abs(np.array(f) + np.random.normal(0, settings["forceNoiseWidth"], size=len(f))) for f in forceGuessArr]
if settings["alphaNoiseWidth"] is not None:
alphaGuessArr = [np.abs(np.array(a) + np.random.normal(0, settings["alphaNoiseWidth"], size=len(a))) for a in alphaGuessArr]
initialGuessTimes[i] = time.perf_counter() - trackingTimes[i] - start
# -------------------------------
# Optimize each particle's forces
# -------------------------------
optimizedForceArr = []
optimizedBetaArr = []
optimizedAlphaArr = []
failed = [False for i in range(len(centers))]
# Drop forces on any particles whose g2 is lower than the min value
skipParticles = [False for i in range(len(centers))]
if checkMinG2:
gSqr = gSquared(peImage)
for j in range(len(centers)):
cMask = circularMask(peImage.shape, centers[j], radii[j])[:,:,0]
avgG2 = np.sum(gSqr * cMask) / np.sum(cMask)
skipParticles[j] = avgG2 < minParticleG2
# Mostly just a debug option, so we can test particle tracking
if not settings["performOptimization"]:
optimizedForceArr = forceGuessArr
optimizedAlphaArr = alphaGuessArr
optimizedBetaArr = betaGuessArr
else:
# This is what should run the majority of the time
for j in range(len(centers)):
if not skipParticles[j]:
try:
# We don't need to pass fSigma, pxPerMeter, or brightfield to the method
# because they will get added to optimizationKwargs automatically.
optForceArr, optBetaArr, optAlphaArr, res = forceOptimize(forceGuessArr[j], betaGuessArr[j], alphaGuessArr[j], radii[j], centers[j], peImage,
#settings["fSigma"], settings["pxPerMeter"], settings["brightfield"],
**optimizationKwargs)
optimizedForceArr.append(optForceArr)
optimizedBetaArr.append(optBetaArr)
optimizedAlphaArr.append(optAlphaArr)
except Exception as ex:
print(ex)
errorMsgs.append(f'File {imageFiles[i]}: ' + str(ex) + '\n')
failed[j] = True
totalFailedParticles += 1
# Append empty lists (ie say there are no forces)
#optimizedForceArr.append(forceGuessArr[j])
#optimizedBetaArr.append(betaGuessArr[j])
#optimizedAlphaArr.append(alphaGuessArr[j])
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
else:
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
# If necessary, impose force balance on all particles
if requireForceBalance:
for j in range(len(centers)):
optimizedForceArr[j], optimizedAlphaArr[j] = singleParticleForceBalance(optimizedForceArr[j], optimizedAlphaArr[j], optimizedBetaArr[j])
optimizationTimes[i] = time.perf_counter() - initialGuessTimes[i] - trackingTimes[i] - start
# Save all of our values
forceArr.append(optimizedForceArr)
betaArr.append(optimizedBetaArr)
alphaArr.append(optimizedAlphaArr)
centersArr.append(centers)
radiiArr.append(radii)
if settings["debug"] or settings["saveMovie"] or settings["genFitReport"]:
estimatedPhotoelasticChannel = np.zeros_like(peImage, dtype=np.float64)
for j in range(len(centers)):
estimatedPhotoelasticChannel += genSyntheticResponse(np.array(forceGuessArr[j]),
np.array(alphaGuessArr[j]),
np.array(betaGuessArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
optimizedPhotoelasticChannel = np.zeros(peImage.shape)
for j in range(len(centers)):
optimizedPhotoelasticChannel += genSyntheticResponse(np.array(optimizedForceArr[j]),
np.array(optimizedAlphaArr[j]),
np.array(optimizedBetaArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
# Just simple mean-squared error
errorArr.append(np.sqrt(np.sum((optimizedPhotoelasticChannel - peImage)**2)))
imgArr = np.zeros((*optimizedPhotoelasticChannel.shape, 3))
img = Image.fromarray(optimizedPhotoelasticChannel*255)
img = img.convert('RGB')
drawObj = ImageDraw.Draw(img)
for j in range(len(centers)):
leftUpPoint = (centers[j][1]-radii[j], centers[j][0]-radii[j])
rightDownPoint = (centers[j][1]+radii[j], centers[j][0]+radii[j])
twoPointList = [leftUpPoint, rightDownPoint]
color = '#FF0000' if failed[j] else '#00AAAA'
drawObj.ellipse(twoPointList, outline=color, fill=None, width=3)
if settings["debug"]:
clear_output(wait=True)
fig, ax = plt.subplots(1, 3, figsize=(12,4))
ax[0].imshow(maskArr * image)
ax[0].set_title('Tracked Particles')
for j in range(len(centers)):
c = plt.Circle(centers[j][::-1], radii[j], label='Detected particles', color='teal', fill=False, linewidth=1)
ax[0].add_artist(c)
# Now add contacts
for k in range(len(betaGuessArr[j])):
contactPoint = centers[j] + radii[j] * np.array([np.cos(betaGuessArr[j][k]), np.sin(betaGuessArr[j][k])])
cc = plt.Circle(contactPoint[::-1], 12, color='red', fill=False, linewidth=1)
ax[1].add_artist(cc)
# Now plot past center positions
#for k in range(len(centersArr)):
# if len(centersArr[k]) >= j:
# cc = plt.Circle(centersArr[k][j][::-1], 5, color=centerColors[j], fill=True)
# ax[0].add_artist(cc)
ax[1].imshow(estimatedPhotoelasticChannel)
ax[1].set_title('Initial Guess for Optimizer\n(known forces)')
ax[2].imshow(img)
ax[2].set_title('Optimized Forces\n(known forces)')
fig.suptitle(imageFiles[i])
fig.tight_layout()
plt.show()
if settings["saveMovie"]:
imageArr.append(img)
miscTimes[i] = time.perf_counter() - optimizationTimes[i] - initialGuessTimes[i] - trackingTimes[i] - start
if settings["debug"]:
print(f'Took {time.perf_counter() - start:.5}s to solve frame:')
print(f'{5*' '}Tracking: {trackingTimes[i]:.3}s')
print(f'{5*' '}Initial guess: {initialGuessTimes[i]:.3}s')
print(f'{5*' '}Optimization: {optimizationTimes[i]:.3}s')
print(f'{5*' '}Misc. processes: {miscTimes[i]:.3}s')
if settings["showProgressBar"]:
bar.update()
# Restructure the arrays to make them more friendly, and to track forces/particles across timesteps
rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr = rectangularizeForceArrays(forceArr, alphaArr, betaArr, centersArr, radiiArr)
# --------------
# Track rotation
# --------------
# We choose to do this after the actual solving because it helps
# to have the rectangular force arrays.
padding = settings["guessRadius"] + 5
# First, we generate our reference images, which are the first
# time a particle is completely in frame.
refImages = [None] * len(rectCenterArr)
for i in range(len(refImages)):
for j in range(len(imageFiles)):
if not True in np.isnan(rectCenterArr[i][j]):
# Continue to the next frame if this one is partially offscreen
if True in ((rectCenterArr[i][j] - padding) < 0) or True in ((rectCenterArr[i][j] - np.array(imageSize) + padding) > 0):
continue
# Otherwise, this is a good frame, so we save it
refImageFull = checkImageType(settings["imageDirectory"] + imageFiles[j])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
refImageFull *= circularMask(refImageFull.shape, rectCenterArr[i][j], rectRadiusArr[i][j])[:,:,0]
refImages[i] = refImageFull[int(rectCenterArr[i][j][0] - padding):int(rectCenterArr[i][j][0] + padding), int(rectCenterArr[i][j][1] - padding):int(rectCenterArr[i][j][1] + padding)]
# And move onto the next particle
break
# Same shape as the radius array: 1 value for each timestep, for each particle
rectAngleArr = np.zeros(rectRadiusArr.shape)
# Set all values to be np.nan initially
rectAngleArr[:,:] = np.nan
# Now we compare that reference particle to each subsequent frame
# (probably not best practice that I've switched the indices
# with respect to the previous statements, but :/)
for i in range(len(imageFiles)):
currentImageFull = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
for j in range(len(refImages)):
# Make sure we have a reference image, and the particle is in full view
if True in np.isnan(rectCenterArr[j][i]):
continue
if True in ((rectCenterArr[j][i] - padding) < 0) or True in ((rectCenterArr[j][i] - np.array(imageSize) + padding) > 0):
continue
# Crop out around the particle and mask it
currImage = (circularMask(currentImageFull.shape, rectCenterArr[j][i], rectRadiusArr[j][i])[:,:,0] * currentImageFull)[int(rectCenterArr[j][i][0] - padding):int(rectCenterArr[j][i][0] + padding), int(rectCenterArr[j][i][1] - padding):int(rectCenterArr[j][i][1] + padding)]
# Which is the kernel and which is the reference image doesn't really matter
# (as long as we are consistent)
# We can choose our bounds based on the previous value of the rotation
if i >= 1 and not np.isnan(rectAngleArr[j,i-1]):
rotationBounds = (rectAngleArr[j,i-1] - .1, rectAngleArr[j,i-1] + .1)
else:
# If either i=0 or the previous rotation value is nan, we should start around 0
# anyway (since we define 0 arbitrarily)
rotationBounds = (-.2, .2)
# .003 was chosen based on the data presented in the wiki
# https://github.com/Jfeatherstone/pepe/wiki/Angular-Convolution
thetaArr, convArr = angularConvolution(refImages[j], currImage, dTheta=.003, angleBounds=rotationBounds)
rectAngleArr[j,i] = thetaArr[findPeaksMulti(convArr)[0][0][0]]
# Reuse the name of the folder the images come from as a part of
# the output folder name
# [-2] element for something of form 'path/to/final/folder/' will be 'folder'
# If we are missing the final /, you have to take just the [-1] element
if settings["imageDirectory"][-1] == '/':
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-2] + f'_Synthetic{settings['outputExtension']}/'
else:
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-1] + f'_Synthetic{settings['outputExtension']}/'
if not os.path.exists(outputFolderPath):
os.mkdir(outputFolderPath)
if settings["saveMovie"]:
imageArr[0].save(outputFolderPath + 'Synthetic.gif', save_all=True, append_images=imageArr[1:], duration=30, optimize=False, loop=0)
# Write a readme file that contains all of the parameters that the solving used
lines = ['#####################\n',
'# README FILE #\n',
'#####################\n']
lines += [f'Generated: {time.ctime()}\n\n']
lines += ['Note: this file was autogenerated by the `pepe.auto.forceSolve()` function\n',
' and it is not recommended to be manually edited. To reuse the settings\n',
' and parameters that were used here, the path of this file\n',
f' (\'{outputFolderPath}readme.txt\') \n',
' can be passed via the \'settingsFile\' keyword argument of `pepe.auto.forceSolve()`.\n',
' In this case, explictly passed arguments will override the values in the settings file.\n']
lines += ['\n## Runtime Information\n',
f'Version: pepe {pepe.__version__}\n',
f'Total runtime: {time.perf_counter() - overallStartTime:.6}s\n',
f'Mean tracking time: {np.mean(trackingTimes):.4}s\n',
f'Mean guess generation time: {np.mean(initialGuessTimes):.4}s\n',
f'Mean optimization time: {np.mean(optimizationTimes):.4}s\n',
f'Mean misc. time: {np.mean(miscTimes):.4}s\n',
f'Number of failed particles: {totalFailedParticles}\n']
settings.update(circleTrackingKwargs)
settings.update(optimizationKwargs)
lines += ['\n## Settings\n']
for k,v in settings.items():
lines += [f'{k}: {v}\n']
lines += ['\n## Errors\n']
if len(errorMsgs) > 0:
lines += errorMsgs
else:
lines += ['None :)']
with open(outputFolderPath + 'readme.txt', 'w') as readmeFile:
readmeFile.writelines(lines)
# Save the arrays to pickle files (optional)
if settings["pickleArrays"]:
with open(outputFolderPath + 'forces.pickle', 'wb') as f:
pickle.dump(rectForceArr, f)
with open(outputFolderPath + 'alphas.pickle', 'wb') as f:
pickle.dump(rectAlphaArr, f)
with open(outputFolderPath + 'betas.pickle', 'wb') as f:
pickle.dump(rectBetaArr, f)
with open(outputFolderPath + 'centers.pickle', 'wb') as f:
pickle.dump(rectCenterArr, f)
with open(outputFolderPath + 'radii.pickle', 'wb') as f:
pickle.dump(rectRadiusArr, f)
with open(outputFolderPath + 'angles.pickle', 'wb') as f:
pickle.dump(rectAngleArr, f)
# Save the raw arrays too, since I think I have a bug in my rectangularization process
# if settings["pickleArrays"]:
# with open(outputFolderPath + 'forces_raw.pickle', 'wb') as f:
# pickle.dump(forceArr, f)
#
# with open(outputFolderPath + 'alphas_raw.pickle', 'wb') as f:
# pickle.dump(alphaArr, f)
#
# with open(outputFolderPath + 'betas_raw.pickle', 'wb') as f:
# pickle.dump(betaArr, f)
#
# with open(outputFolderPath + 'centers_raw.pickle', 'wb') as f:
# pickle.dump(centersArr, f)
#
# with open(outputFolderPath + 'radii_raw.pickle', 'wb') as f:
# pickle.dump(radiiArr, f)
# Generate a fit report (optional)
# This include informtaion about the error for each frame, all of the forces/alphas/betas/
# centers/radii for each particle at each timestep, and all settings in a nicely compiled
# (via latex) pdf.
if settings["genFitReport"]:
# Make the source directory
if not os.path.exists(outputFolderPath + 'FitReport_src'):
os.mkdir(outputFolderPath + 'FitReport_src')
# First, generate a plot of the error
fig, ax = plt.subplots()
ax.plot(errorArr)
ax.set_xlabel('Frame')
ax.set_ylabel('Mean-squared error')
ax.set_title('Difference Between Optimized Result and Real Image')
fig.savefig(outputFolderPath + 'FitReport_src/error.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/error.png')
plt.close(fig)
# Draw all of the circles, with their labeled numbers
fig, ax = plt.subplots(1, 2, figsize=(8,3))
# First timestep
visCircles([rectCenterArr[i][0] for i in range(len(rectCenterArr))], [rectRadiusArr[i][0] for i in range(len(rectRadiusArr))],
ax=ax[0], annotations=np.arange(len(rectCenterArr)), setBounds=True)
# Last timestep
visCircles([rectCenterArr[i][-1] for i in range(len(rectCenterArr))], [rectRadiusArr[i][-1] for i in range(len(rectRadiusArr))],
ax=ax[1], annotations=np.arange(len(rectCenterArr)), setBounds=True)
for i in range(2):
ax[i].set_xlabel('X [px]')
ax[i].set_ylabel('Y [px]')
ax[i].invert_yaxis()
ax[0].set_title('First Frame')
ax[1].set_title('Last Frame')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.png')
plt.close(fig)
# Next, draw the forces/betas/alphas/centers for each particle
# through time
for i in range(len(rectForceArr)):
fig, ax = visForces(rectForceArr[i], rectAlphaArr[i], rectBetaArr[i], rectCenterArr[i], rectAngleArr[i])
fig.suptitle(f'Particle {i}')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.pdf')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.png')
plt.close(fig)
# Create a gif of the particle orientation through time, overlaid
# on the original images
visRotation([settings["imageDirectory"] + f for f in imageFiles],
rectCenterArr, rectRadiusArr, rectAngleArr, outputFolderPath + 'FitReport_src/', (0, cropXMin))
# Create gifs of the contacts
forceColors = genColors(len(rectBetaArr))
# The list comprehension is to make sure that we index a particle that actually has forces acting
# on it.
tSteps = len(imageFiles)#len([b for b in rectBetaArr if len(b) > 0][0])
contactPointImages = [None for i in range(tSteps)]
contactAngleImages = [None for i in range(tSteps)]
for i in range(tSteps):
# Have to do this, because the settings variable could be None
startI = settings["imageStartIndex"] if settings["imageStartIndex"] is not None else 0
# First, just the contact points
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactPointImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
# Now the one with angles
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex], alphaArr=rectAlphaArr[particleIndex][:,i])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactAngleImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
contactPointImages[0].save(outputFolderPath + 'FitReport_src/contact_points.gif', save_all=True,
append_images=contactPointImages[1:], duration=20, optimize=True, loop=0)
contactAngleImages[0].save(outputFolderPath + 'FitReport_src/contact_angles.gif', save_all=True,
append_images=contactAngleImages[1:], duration=20, optimize=True, loop=0)
if settings["showProgressBar"]:
bar.update()
bar.close()
return rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr, rectAngleArr
| """
Start from a directory of images and solve for all of the particle positions, orientations, and forces.
"""
import numpy as np
import os
import cv2
import time
import pickle
import inspect
import ast
import tqdm
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from IPython.display import clear_output
import pepe
from pepe.preprocess import checkImageType, lightCorrectionDiff, circularMask
from pepe.analysis import initialForceSolve, forceOptimize, gSquared, g2ForceCalibration, singleParticleForceBalance, forceOptimizeArgDTypes
from pepe.tracking import houghCircle, convCircle, angularConvolution, circleTrackArgDTypes
from pepe.simulate import genSyntheticResponse
from pepe.utils import preserveOrderArgsort, rectangularizeForceArrays, explicitKwargs, parseList
from pepe.visualize import genColors, visCircles, visForces, visContacts, visRotation
from pepe.topology import findPeaksMulti
# All of the dtypes of the args for the below method
# The following args are not included, because they are not
# important: progressBarOffset, progressBarTitle
forceSolveArgDTypes = {"imageDirectory": str,
"imageExtension": str,
"imageEndIndex": int,
"imageStartIndex": int,
"carryOverAlpha": bool,
"carryOverForce": bool,
"showProgressBar": bool,
"lightCorrectionImage": str,
"lightCorrectionVerticalMask": str,
"lightCorrectionHorizontalMask": str,
"g2CalibrationImage": str,
"g2CalibrationCutoffFactor": float,
"maskImage": str,
"cropXMin": int,
"cropXMax": int,
"circleDetectionMethod": str,
"guessRadius": float,
"fSigma": float,
"pxPerMeter": float,
"brightfield": bool,
"contactPadding": int,
"g2MaskPadding": int,
"contactMaskRadius": int,
"peBlurKernel": int,
"requireForceBalance": bool,
"circleTrackingChannel": int,
"circleTrackingKwargs": dict,
"photoelasticChannel": int,
"optimizationKwargs": dict,
"maxBetaDisplacement": float,
"forceNoiseWidth": float,
"alphaNoiseWidth": float,
"saveMovie": bool,
"pickleArrays": bool,
"outputRootFolder": str,
"outputExtension": str,
"genFitReport": bool,
"performOptimization": bool,
"inputSettingsFile": str,
"debug": bool}
# Decorator that allows us to identify which keyword arguments were explicitly
# passed to the function, and which were left as default values. See beginning
# of method code for more information/motivation.
@explicitKwargs()
def forceSolve(imageDirectory, guessRadius=0.0, fSigma=0.0, pxPerMeter=0.0, brightfield=True, contactPadding=15, g2MaskPadding=2, contactMaskRadius=30, lightCorrectionImage=None, lightCorrectionHorizontalMask=None, lightCorrectionVerticalMask=None, g2CalibrationImage=None, g2CalibrationCutoffFactor=.9, maskImage=None, cropXMin=None, cropXMax=None, peBlurKernel=3, imageExtension='bmp', requireForceBalance=False, imageStartIndex=None, imageEndIndex=None, carryOverAlpha=True, carryOverForce=True, circleDetectionMethod='convolution', circleTrackingKwargs={}, circleTrackingChannel=0, maxBetaDisplacement=.5, photoelasticChannel=1, forceNoiseWidth=.03, alphaNoiseWidth=.01, optimizationKwargs={}, performOptimization=True, debug=False, showProgressBar=True, progressBarOffset=0, progressBarTitle=None, saveMovie=False, outputRootFolder='./', inputSettingsFile=None, pickleArrays=True, genFitReport=True, outputExtension=''):
"""
Complete pipeline to solve for forces and particle positions for all image files
in a directory. Results will be returned and potentially written to various files.
See `Returns` section for more information
Expects all particles to be the same (or very similar) sizes. This assumption is made
by the calculation of the gradient squared calibration value, which is computed just
once using the guess of the radii. This should not be a problem if the radii are only
slightly varied (~10 pixels or something) but any more than that and errors will begin
to accumulate.
This method has **a lot** of arguments; it is intended to be used once reasonable
values for all of these have already been found. While the `debug` option for this function
is very helpful, it is recommended to utilize the various notebooks/examples to find good
choices for parameters first.
The output readme file can also serve as a cache of the parameter values/settings, which
can be passed back to future calls of this method using the `inputSettingsFile` argument.
Parameters
----------
imageDirectory : str
An absolute or relative path to the directory containing the images that are
to be analyzed. The names of the images need not follow any particular naming
scheme, but they should be such that sorting the list alphabetically will give
the images in their proper order.
guessRadius : float
The radius of the particles to be detected, in pixels. As of now, this value will
be taken as the particle radius, but future versions may be able to vary this to
find the optimal value.
Currently no support for particles of different sizes.
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light and
other material property (denoted as C in most literature; sometimes also called the
"stress optic coefficient").
pxPerMeter : float
The number of pixels per meter in the images. Depends on the camera, lens, and
zoom settings used to capture the images.
Note that this is **not** the inverse of the number of pixels per meter, as is used
in some of the other force solving implementations.
brightfield : bool
Whether the images are captured using a brightfield polariscope (`True`) or
a darkfield polariscope (`False`).
contactPadding : int
Maximum distance (in pixels) between a particle's edge and the wall or the edges of two
particles that will still be considered a potential force-bearing contact.
g2MaskPadding : int or float
Number of pixels to ignore at the edge of each particle when calculating the average G^2.
If float value < 1 is passed, gradient mask radius will be taken as that percent of the full
particle radius. A value of 0 means no padding is included.
contactMaskRadius : float
The radius of the circular mask that will be constructed around each contact to estimate
the magnitude of the force using the gradient squared in that region.
lightCorrectionImage : str or np.ndarray[H,W[,C]]
The path to an image (or an array representing an image) that contains no particles, and
can be used to correct for any light gradients present in the actual data.
lightCorrectionHorizontalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the horizontal light correction should be calculated. Can also be a path to an image.
lightCorrectionVerticalMask : str or np.ndarray[H,W[,C]]
A mask array, containing values of `0` or `1`, with the latter representing areas over which
the vertical light correction should be calculated. Can also be a path to an image.
g2CalibrationImage : str or np.ndarray[H,W,C]
An image with a single particle (or at least one particle) that has no force acting on it.
Used to determine the base level of gradient squared (due to noise) for a free particle. Can
also be a path to an image.
g2CalibrationCutoffFactor : float
The factor that is multipled by the mean gradient squared value of the particles in the
calibration image. Any particle that has an average gradient squared value below the
calibration value multipled by this factor will be assumed to have no forces acting on it.
maskImage : str or np.ndarray[H,W,C]
A mask array, containing values of `0` or `1`, with the latter representing the regions of
importance for the image. Used in detecting particles, generating initial guesses, and
calculating error during non-linear optimization. can also be a path to an image.
cropXMin : int or None
Left bound to crop down the image in the x direction.
cropXMax : int or None
Right bound to crop down the image in the x direction.
peBlurKernel : int
The kernel size that will be used for bluring the photoelastic channel of each image, to
reduce noise. Should be an odd integer.
imageExtension : str
The extension of the image files that will be read in from `imageDirectory`. Should not include
the '.' before the extension.
requireForceBalance : bool
Whether to impose particle-wise force balance at each step (`True`) or to take the results of
the optimization process as they are (`False`).
Currently WIP, and does not do anything.
forceBalanceWeighting : float
If a non-zero positive value, adds a contribution to the optimization cost
pertaining to how well the ensemble of forces satisfy force balance.
imageStartIndex : int or None
The index of which image to start at when analyzing the files in `imageDirectory`. Value
of `None` will start at the first (alphabetically sorted) image.
imageEndIndex : int or None
The index of which image to end at when analyzing the files in `imageDirectory`. Value
of `None` will end at the last (alphabetically sorted) image.
circleDetectionMethod : ['convolution' or 'hough']
Whether to use the convolution or hough circle detection method to identify particles.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingKwargs : **kwargs
Keyword arguments to be passed to the selected circle tracking function.
See `pepe.tracking.convCircle()` and `pepe.tracking.houghCircle()` for more information.
circleTrackingChannel : int
The channel of the image that will be used to track the particles. `0` for red, `1` for
green, and `2` for blue.
maxBetaDisplacement : float
The maximum distance (angle) that a force can move between frames and still be identified
as the same force. If a force moves more than this value, it will still be recorded as a force,
but will be considered a new and independent force from any of the ones in the previous frame.
photoelasticChannel : int
The channel of the image that will be used to gauge the photoelastic response. `0` for red, `1` for
green, and `2` for blue.
forceNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
force guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
alphaNoiseWidth : float or None
The width of the gaussian distribution (centered at 0) that noise is sampled from to add to the
alpha guesses (potentially from the previous frame). This is done to avoid getting stuck in a local
minimum for too long (adds some Monte-Carlo-esque behavior to the solving).
optimizationKwargs : **kwargs
Keyword arguments to be passed to the optimization process.
For more information, see `pepe.analysis.forceOptimize()`.
performOptimization : bool
Whether or not to perform optimization on the particles.
Mostly included as a debug option, but any real data analysis should
utilize the optimization, as the initial guessing is often not nearly
accurate enough to get any real results.
debug : bool
Whether to print progress updates for each frame to the screen (`True`) or not (`False`).
showProgressBar : bool
Whether to show a progress bar throughout the analysis (`True`) or not (`False`). Uses
`tqdm` library.
progressBarOffset : int
The number of lines to offset the progress bar by. Generally an internal variable
used when multiple threads are active.
progressBarTitle : str
The text to be written to the left of the progress bar. Generally an internal variable
controlled by some solving script.
saveMovie : bool
Whether to save a compiled gif of the reconstructed forces at each frame at the end (`True`)
or not (`False`).
outputRootFolder : str
The location where the output folder (potentially containg the movie, pickle files, readme, etc.)
will be created. Output folder itself will be named after the `imageDirectory`, with '_Synthetic'
appended to the end.
pickleArrays : bool
Whether to save the forces, betas, alphas, centers, and radii as pickle files (`True`) or not (`False`).
Files will be located in the output folder (see `outputRootFolder`).
inputSettingsFile : str
Path to a readme file containg parameters for the solving process, likely generated from
a previous iteration of the program. Explicitly passed arguments will override those that
are included in the settings file.
Currently WIP and does not do anything.
genFitReport : bool
Whether or not to generate a fit report of the results, including errors per frame,
examinations of all particles/forces, and settings, compiled in a latex pdf.
Will generate both the compiled file 'FitReport.pdf' and the source directory
'FitReport_src/'.
Returns
-------
rectForceArr : list[P](np.ndarray[F,T])
A list of arrays representing the force magnitude for each force on each particle.
rectAlphaArr : list[P](np.ndarray[F,T])
A list of arrays representing the alpha angle for each force on each particle.
rectBetaArr : list[P](np.ndarray[F,T])
A list of arrays representing the beta angle for force on each particle.
rectCenterArr : np.ndarray[P,T,2]
Particle centers for each timestep. Elements take on a value of `[np.nan, np.nan]`
if the particle does not exist for a given timestep.
rectRadiusArr : np.ndarray[P,T]
Particle radii for each timestep. Elements take on a value of `np.nan` if the particle
does not exist for a given timestep.
Depending on kwarg values, several files may be written created in the output
folder, which will be located in `outputRootFolder` and named according
to: '<`imageDirectory`>_Synthetic/'.
"""
overallStartTime = time.perf_counter()
# For the sake of saving the options to a readme file (and potentially)
# reading them back out, it is easiest to keep all of the settings in a
# dictionary
# We have 3 layers of precedence for reading in settings:
# 1. Explicitly passed kwarg
# 2. Read in from settings file
# 3. Default value of a kwarg
# So we assign the elements of our settings dict in opposite order
# 3. All of the default values
# The following variables are not present:
# progressBarOffset, progressBarTitle
# This is because we don't care about saving them
settings = {"imageDirectory": os.path.abspath(imageDirectory) + '/', # Convert to absolute path
"imageExtension": imageExtension,
"imageEndIndex": imageEndIndex,
"imageStartIndex": imageStartIndex,
"carryOverAlpha": carryOverAlpha,
"carryOverForce": carryOverForce,
"lightCorrectionImage": lightCorrectionImage,
"lightCorrectionVerticalMask": lightCorrectionVerticalMask,
"lightCorrectionHorizontalMask": lightCorrectionHorizontalMask,
"g2CalibrationImage": g2CalibrationImage,
"g2CalibrationCutoffFactor": g2CalibrationCutoffFactor,
"maskImage": maskImage,
"cropXMin": cropXMin,
"cropXMax": cropXMax,
"circleDetectionMethod": circleDetectionMethod,
"guessRadius": guessRadius,
"fSigma": fSigma,
"pxPerMeter": pxPerMeter,
"brightfield": brightfield,
"contactPadding": contactPadding,
"g2MaskPadding": g2MaskPadding,
"contactMaskRadius": contactMaskRadius,
"peBlurKernel": peBlurKernel,
"requireForceBalance": requireForceBalance,
"circleTrackingChannel": circleTrackingChannel,
"photoelasticChannel": photoelasticChannel,
"maxBetaDisplacement": maxBetaDisplacement,
"forceNoiseWidth": forceNoiseWidth,
"alphaNoiseWidth": alphaNoiseWidth,
"showProgressBar": showProgressBar,
"saveMovie": saveMovie,
"pickleArrays": pickleArrays,
"outputRootFolder": outputRootFolder,
"outputExtension": outputExtension,
"genFitReport": genFitReport,
"performOptimization": performOptimization,
"debug": debug}
# For the next step, we will need to know all of the data types of each
# argument (to properly cast). Because certain arguments have None as a default
# value, we can't automatically generate this information.
# See above this method for the list of these, since they are also used
# in the TrialObject file
# We need to do the same thing for the kwargs for both
# circle tracking and optimization
# These have both been moved to either the tracking/DTypes.py
# or the analysis/ForceSolve.py files, respectively
# Now add all the dictionaries together
argDTypes = forceSolveArgDTypes.copy()
argDTypes.update(circleTrackArgDTypes)
argDTypes.update(forceOptimizeArgDTypes)
# 2. Anything read in from a settings file
# Note that it works to our advantage that we already have values for most entries,
# since the settings file doesn't include type information, so we need the old
# values to cast properly. 1. is actually contained in here as well, because
# we can just check to see if that variable was explicitly passed before overwriting it.
# 1. The kwargs that are explicitly passed
# This one is a little tricky, because there isn't a super great way by default
# to differentiate whether a kwarg is explicitly passed or is its default value
# (without just keeping a list of default values). I also don't want to
# replace the entire function signature with (*args, **kwargs) because then the
# documentation would not be as good (I think). So the solution here is to define
# a decorator that has the (*args, **kwargs) signature, and to create an attribute
# of this method that is a list of the kwargs that are explicitly passed to the
# decorator. See `pepe.utils.explicitKwargs()` for more info.
if inputSettingsFile is not None:
if os.path.exists(inputSettingsFile):
fileObj = open(inputSettingsFile, 'r')
for line in fileObj:
# Check each line and see if it looks like a dictionary value
split = line.split(':')
# Read settings into the master settings file
if len(split) == 2 and split[0].strip() in argDTypes.keys() and not split[0].strip() in forceSolve.explicit_kwargs:
# Cast to the type of the value already in the dict
if split[1].strip() == 'None':
settings[split[0].strip()] = None
else:
if '[' in split[1]:
settings[split[0].strip()] = parseList(split[1].strip(), dtype=argDTypes[split[0].strip()])
else:
# Bools need a special condition
if argDTypes[split[0].strip()] is bool:
val = split[1].strip() == 'True'
else:
val = argDTypes[split[0].strip()](split[1].strip())
settings[split[0].strip()] = val
else:
print(f'Warning: provided settings file does not exist! Attempting to run regardless...')
# While the following variables all have a default value of 0, they cannot actually
# be left as this value. The reason they have a default value is so that if these
# values are indicated by a settings file, we don't want to have to enter them again.
# So here, we make sure we have values for them all, either explicitly passed or read in.
requiredVars = ["guessRadius", "fSigma", "pxPerMeter"]
for r in requiredVars:
assert settings[r] != 0, f'Error: {r} value not supplied explicitly or implicitly!'
# Now carry over the kwargs that are sent to the optimization procedure into that
# dictionary. We can find the names of arguments by using the `inspect` library
possibleOptimKwargs = list(inspect.signature(forceOptimize).parameters.keys())
for pkw in possibleOptimKwargs:
if pkw in settings.keys():
optimizationKwargs[pkw] = settings[pkw]
# We want to do the same thing for the circle tracking function, but we don't
# yet know which circle tracking function we are using yet, so we'll carry
# that over a bit later.
# Find all images in the directory
imageFiles = os.listdir(settings["imageDirectory"])
# This goes before the sorting/extension filtering so we can get more specific
# error messages (and we have another one of these below)
if len(imageFiles) < 1:
print(f'Error: directory {imageDirectory} contains no files!')
return None
imageFiles = np.sort([img for img in imageFiles if img[-len(settings["imageExtension"]):] == settings["imageExtension"]])
# We have to do the end index first, so it doesn't mess up the start one
if settings["imageEndIndex"] is not None:
imageFiles = imageFiles[:min(settings["imageEndIndex"], len(imageFiles))]
if settings["imageStartIndex"] is not None:
imageFiles = imageFiles[max(settings["imageStartIndex"], 0):]
# Make sure we still have some proper images
if len(imageFiles) < 1:
print(f'Error: directory \'{settings["imageDirectory"]}\' contains no files with extension \'{settings["imageExtension"]}\'!')
return None
xB = [settings["cropXMin"], settings["cropXMax"]]
imageSize = checkImageType(settings["imageDirectory"] + imageFiles[0])[:,xB[0]:xB[1],0].shape
# This will calculation the light correction across the images
if settings["lightCorrectionImage"] is not None:
# Convert to absolute paths if they are paths
if type(settings["lightCorrectionImage"]) is str:
settings["lightCorrectionImage"] = os.path.abspath(settings["lightCorrectionImage"])
if type(settings["lightCorrectionVerticalMask"]) is str:
settings["lightCorrectionVerticalMask"] = os.path.abspath(settings["lightCorrectionVerticalMask"])
if type(settings["lightCorrectionHorizontalMask"]) is str:
settings["lightCorrectionHorizontalMask"] = os.path.abspath(settings["lightCorrectionHorizontalMask"])
cImageProper = checkImageType(settings["lightCorrectionImage"])[:,xB[0]:xB[1]]
vMask = checkImageType(settings["lightCorrectionVerticalMask"])[:,xB[0]:xB[1]]
hMask = checkImageType(settings["lightCorrectionHorizontalMask"])[:,xB[0]:xB[1]]
if vMask.ndim == 3:
vMask = vMask[:,:,0]
if hMask.ndim == 3:
hMask = hMask[:,:,0]
lightCorrection = lightCorrectionDiff(cImageProper, vMask, hMask)
trackCorrection = lightCorrection[:,:,settings["circleTrackingChannel"]]
peCorrection = lightCorrection[:,:,settings["photoelasticChannel"]]
else:
# It probably isn't great hygiene to have this variableflip between a single
# value and an array, but you can always add a scalar to a numpy array, so
# this is the easiest way (since we haven't loaded any images yet)
trackCorrection = 0
peCorrection = 0
# Load up the mask image, which will be used to remove parts of the images
# that we don't care about, and also potentially indicate which particles
# are close to the boundary.
if settings["maskImage"] is not None:
maskArr = checkImageType(settings["maskImage"])[:,xB[0]:xB[1]]
ignoreBoundary = False
else:
# Same deal as above: scalar multiplication functions exactly how we want
# in the case that we don't have a mask, so it's just easier to do this.
maskArr = 1
ignoreBoundary = True
# Which method we will be using to detect circles
if settings["circleDetectionMethod"] == 'convolution':
circFunc = convCircle
elif settings["circleDetectionMethod"] == 'hough':
circFunc = houghCircle
else:
print(f'Error: circle detection option \'{settings["circleDetectionMethod"]}\' not recognized!')
return None
# Now that we have a circle tracking function, we can carry over any possible kwargs
possibleCircleKwargs = list(inspect.signature(circFunc).parameters.keys())
for pkw in possibleCircleKwargs:
if pkw in settings.keys():
circleTrackingKwargs[pkw] = settings[pkw]
# Calculate the lowest g2 value that we care about, so we can throw everything
# that is below that away when solving (optional)
checkMinG2 = False
if settings["g2CalibrationImage"] is not None:
g2CalImage = checkImageType(settings["g2CalibrationImage"])[:,xB[0]:xB[1]]
g2CalPEImage = cv2.blur((g2CalImage[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# Locate particles
centers, radii = circFunc((g2CalImage[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# There should only be 1 particle in the calibration image
if len(centers) < 0:
print(f'Warning: Gradient-squared calibration image does not contain any particles! Ignoring...')
else:
particleMask = circularMask(g2CalPEImage.shape, centers[0], radii[0])[:,:,0]
gSqr = gSquared(g2CalPEImage)
minParticleG2 = np.sum(gSqr * particleMask) / np.sum(particleMask) * settings["g2CalibrationCutoffFactor"]
checkMinG2 = True
# The arrays that we will be building for each timestep. It is better to just
# use an untyped list since the arrays are all triangular and whatnot.
centersArr = []
radiiArr = []
forceArr = []
betaArr = []
alphaArr = []
imageArr = []
errorArr = []
# For keeping track of time (though will only be display if debug=True)
trackingTimes = np.zeros(len(imageFiles))
initialGuessTimes = np.zeros(len(imageFiles))
optimizationTimes = np.zeros(len(imageFiles))
miscTimes = np.zeros(len(imageFiles))
totalFailedParticles = 0
errorMsgs = []
if settings["showProgressBar"]:
bar = tqdm.tqdm(total=len(imageFiles)+1, position=progressBarOffset, desc=progressBarTitle)
# Calculate the gradient-squared-to-force calibration value
g2Cal = g2ForceCalibration(settings["fSigma"], settings["guessRadius"], settings["pxPerMeter"])
# The big loop that iterates over every image
for i in range(len(imageFiles)):
image = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1]]
# Convert to floats on the domain [0,1], so we can compare to the output of
# genSyntheticResponse()
peImage = cv2.blur((image[:,:,settings["photoelasticChannel"]] + peCorrection).astype(np.float64) / 255, (settings["peBlurKernel"],settings["peBlurKernel"]))
# -------------
# Track circles
# -------------
start = time.perf_counter()
centers, radii = circFunc((image[:,:,settings["circleTrackingChannel"]] + trackCorrection) * maskArr[:,:,0], settings["guessRadius"], **circleTrackingKwargs)
# We do some indexing using the centers/radii, so it is helpful
# to have them as an integer type
centers = centers.astype(np.int64)
radii = radii.astype(np.int64)
# We want to keep the order of particles constant, so we make sure
# that they are (to whatever extent possible) in the same order
# as the previous frame. This involves finding the closest neighbor
# from the previous frame.
if len(centersArr) > 0:
centerOrder = preserveOrderArgsort(centersArr[-1], centers, padMissingValues=False)
centers = centers[centerOrder]
radii = radii[centerOrder]
trackingTimes[i] = time.perf_counter() - start
# ----------------------
# Generate initial guess
# ----------------------
# We run the initial guess regardless of whether we are going to overwrite
# with values from the previous frame. This is because the beta values
# are caluclated via the contact network, which should not be carried over
# (since the particles are moving).
forceGuessArr, alphaGuessArr, betaGuessArr = initialForceSolve(peImage,
centers, radii, settings["fSigma"], settings["pxPerMeter"],
settings["contactPadding"], settings["g2MaskPadding"],
contactMaskRadius=settings["contactMaskRadius"],
boundaryMask=maskArr, ignoreBoundary=ignoreBoundary, g2Cal=g2Cal)
if len(centersArr) > 0:
# If we have added/lost particles, we want to carry over the previous values where
# possible, and otherwise take the results of initialForceSolve
# Note that this is the complement to the center order calculated previously:
# this orders the old centers according the new ones.
# We make the assumption that a particle cannot travel more than it's radius in a single frame
oldCenterOrder = preserveOrderArgsort(centers, centersArr[-1], padMissingValues=True, maxDistance=settings["guessRadius"])
# Now find each new particle's old counterpart (if it exists), and then
# line up the forces using the value of beta, such that we can (optionally)
# carry over force magnitudes and alpha values.
for j in range(len(betaGuessArr)):
if oldCenterOrder[j] is None:
continue
# maxBetaDisplacement should be an angle value (in radians) that a force would
# never move in a single frame, but is large enough to not lose a force if it
# moves because of noise/small fluctuations.
forceOrder = preserveOrderArgsort(betaGuessArr[j], betaArr[-1][oldCenterOrder[j]], padMissingValues=True, maxDistance=settings["maxBetaDisplacement"])
#print(f'frame {i}, particle {j}: {forceOrder}')
for k in range(len(forceGuessArr[j])):
if forceOrder[k] is not None:
if settings["carryOverForce"]:
forceGuessArr[j][k] = forceArr[-1][oldCenterOrder[j]][forceOrder[k]]
if settings["carryOverAlpha"]:
alphaGuessArr[j][k] = alphaArr[-1][oldCenterOrder[j]][forceOrder[k]]
# In this case, we want to add a small randomly generated contribution
# so that the algorithm doesn't get stuck in some incorrect loop and so that it
# explores a little more of the parameter space to find a nice minimum at each step
if settings["forceNoiseWidth"] is not None:
forceGuessArr = [np.abs(np.array(f) + np.random.normal(0, settings["forceNoiseWidth"], size=len(f))) for f in forceGuessArr]
if settings["alphaNoiseWidth"] is not None:
alphaGuessArr = [np.abs(np.array(a) + np.random.normal(0, settings["alphaNoiseWidth"], size=len(a))) for a in alphaGuessArr]
initialGuessTimes[i] = time.perf_counter() - trackingTimes[i] - start
# -------------------------------
# Optimize each particle's forces
# -------------------------------
optimizedForceArr = []
optimizedBetaArr = []
optimizedAlphaArr = []
failed = [False for i in range(len(centers))]
# Drop forces on any particles whose g2 is lower than the min value
skipParticles = [False for i in range(len(centers))]
if checkMinG2:
gSqr = gSquared(peImage)
for j in range(len(centers)):
cMask = circularMask(peImage.shape, centers[j], radii[j])[:,:,0]
avgG2 = np.sum(gSqr * cMask) / np.sum(cMask)
skipParticles[j] = avgG2 < minParticleG2
# Mostly just a debug option, so we can test particle tracking
if not settings["performOptimization"]:
optimizedForceArr = forceGuessArr
optimizedAlphaArr = alphaGuessArr
optimizedBetaArr = betaGuessArr
else:
# This is what should run the majority of the time
for j in range(len(centers)):
if not skipParticles[j]:
try:
# We don't need to pass fSigma, pxPerMeter, or brightfield to the method
# because they will get added to optimizationKwargs automatically.
optForceArr, optBetaArr, optAlphaArr, res = forceOptimize(forceGuessArr[j], betaGuessArr[j], alphaGuessArr[j], radii[j], centers[j], peImage,
#settings["fSigma"], settings["pxPerMeter"], settings["brightfield"],
**optimizationKwargs)
optimizedForceArr.append(optForceArr)
optimizedBetaArr.append(optBetaArr)
optimizedAlphaArr.append(optAlphaArr)
except Exception as ex:
print(ex)
errorMsgs.append(f'File {imageFiles[i]}: ' + str(ex) + '\n')
failed[j] = True
totalFailedParticles += 1
# Append empty lists (ie say there are no forces)
#optimizedForceArr.append(forceGuessArr[j])
#optimizedBetaArr.append(betaGuessArr[j])
#optimizedAlphaArr.append(alphaGuessArr[j])
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
else:
optimizedForceArr.append([])
optimizedBetaArr.append([])
optimizedAlphaArr.append([])
# If necessary, impose force balance on all particles
if requireForceBalance:
for j in range(len(centers)):
optimizedForceArr[j], optimizedAlphaArr[j] = singleParticleForceBalance(optimizedForceArr[j], optimizedAlphaArr[j], optimizedBetaArr[j])
optimizationTimes[i] = time.perf_counter() - initialGuessTimes[i] - trackingTimes[i] - start
# Save all of our values
forceArr.append(optimizedForceArr)
betaArr.append(optimizedBetaArr)
alphaArr.append(optimizedAlphaArr)
centersArr.append(centers)
radiiArr.append(radii)
if settings["debug"] or settings["saveMovie"] or settings["genFitReport"]:
estimatedPhotoelasticChannel = np.zeros_like(peImage, dtype=np.float64)
for j in range(len(centers)):
estimatedPhotoelasticChannel += genSyntheticResponse(np.array(forceGuessArr[j]),
np.array(alphaGuessArr[j]),
np.array(betaGuessArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
optimizedPhotoelasticChannel = np.zeros(peImage.shape)
for j in range(len(centers)):
optimizedPhotoelasticChannel += genSyntheticResponse(np.array(optimizedForceArr[j]),
np.array(optimizedAlphaArr[j]),
np.array(optimizedBetaArr[j]),
settings["fSigma"], radii[j],
settings["pxPerMeter"], settings["brightfield"], imageSize=peImage.shape,
center=centers[j])
# Just simple mean-squared error
errorArr.append(np.sqrt(np.sum((optimizedPhotoelasticChannel - peImage)**2)))
imgArr = np.zeros((*optimizedPhotoelasticChannel.shape, 3))
img = Image.fromarray(optimizedPhotoelasticChannel*255)
img = img.convert('RGB')
drawObj = ImageDraw.Draw(img)
for j in range(len(centers)):
leftUpPoint = (centers[j][1]-radii[j], centers[j][0]-radii[j])
rightDownPoint = (centers[j][1]+radii[j], centers[j][0]+radii[j])
twoPointList = [leftUpPoint, rightDownPoint]
color = '#FF0000' if failed[j] else '#00AAAA'
drawObj.ellipse(twoPointList, outline=color, fill=None, width=3)
if settings["debug"]:
clear_output(wait=True)
fig, ax = plt.subplots(1, 3, figsize=(12,4))
ax[0].imshow(maskArr * image)
ax[0].set_title('Tracked Particles')
for j in range(len(centers)):
c = plt.Circle(centers[j][::-1], radii[j], label='Detected particles', color='teal', fill=False, linewidth=1)
ax[0].add_artist(c)
# Now add contacts
for k in range(len(betaGuessArr[j])):
contactPoint = centers[j] + radii[j] * np.array([np.cos(betaGuessArr[j][k]), np.sin(betaGuessArr[j][k])])
cc = plt.Circle(contactPoint[::-1], 12, color='red', fill=False, linewidth=1)
ax[1].add_artist(cc)
# Now plot past center positions
#for k in range(len(centersArr)):
# if len(centersArr[k]) >= j:
# cc = plt.Circle(centersArr[k][j][::-1], 5, color=centerColors[j], fill=True)
# ax[0].add_artist(cc)
ax[1].imshow(estimatedPhotoelasticChannel)
ax[1].set_title('Initial Guess for Optimizer\n(known forces)')
ax[2].imshow(img)
ax[2].set_title('Optimized Forces\n(known forces)')
fig.suptitle(imageFiles[i])
fig.tight_layout()
plt.show()
if settings["saveMovie"]:
imageArr.append(img)
miscTimes[i] = time.perf_counter() - optimizationTimes[i] - initialGuessTimes[i] - trackingTimes[i] - start
if settings["debug"]:
print(f'Took {time.perf_counter() - start:.5}s to solve frame:')
print(f'{5*" "}Tracking: {trackingTimes[i]:.3}s')
print(f'{5*" "}Initial guess: {initialGuessTimes[i]:.3}s')
print(f'{5*" "}Optimization: {optimizationTimes[i]:.3}s')
print(f'{5*" "}Misc. processes: {miscTimes[i]:.3}s')
if settings["showProgressBar"]:
bar.update()
# Restructure the arrays to make them more friendly, and to track forces/particles across timesteps
rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr = rectangularizeForceArrays(forceArr, alphaArr, betaArr, centersArr, radiiArr)
# --------------
# Track rotation
# --------------
# We choose to do this after the actual solving because it helps
# to have the rectangular force arrays.
padding = settings["guessRadius"] + 5
# First, we generate our reference images, which are the first
# time a particle is completely in frame.
refImages = [None] * len(rectCenterArr)
for i in range(len(refImages)):
for j in range(len(imageFiles)):
if not True in np.isnan(rectCenterArr[i][j]):
# Continue to the next frame if this one is partially offscreen
if True in ((rectCenterArr[i][j] - padding) < 0) or True in ((rectCenterArr[i][j] - np.array(imageSize) + padding) > 0):
continue
# Otherwise, this is a good frame, so we save it
refImageFull = checkImageType(settings["imageDirectory"] + imageFiles[j])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
refImageFull *= circularMask(refImageFull.shape, rectCenterArr[i][j], rectRadiusArr[i][j])[:,:,0]
refImages[i] = refImageFull[int(rectCenterArr[i][j][0] - padding):int(rectCenterArr[i][j][0] + padding), int(rectCenterArr[i][j][1] - padding):int(rectCenterArr[i][j][1] + padding)]
# And move onto the next particle
break
# Same shape as the radius array: 1 value for each timestep, for each particle
rectAngleArr = np.zeros(rectRadiusArr.shape)
# Set all values to be np.nan initially
rectAngleArr[:,:] = np.nan
# Now we compare that reference particle to each subsequent frame
# (probably not best practice that I've switched the indices
# with respect to the previous statements, but :/)
for i in range(len(imageFiles)):
currentImageFull = checkImageType(settings["imageDirectory"] + imageFiles[i])[:,xB[0]:xB[1],settings["circleTrackingChannel"]]
for j in range(len(refImages)):
# Make sure we have a reference image, and the particle is in full view
if True in np.isnan(rectCenterArr[j][i]):
continue
if True in ((rectCenterArr[j][i] - padding) < 0) or True in ((rectCenterArr[j][i] - np.array(imageSize) + padding) > 0):
continue
# Crop out around the particle and mask it
currImage = (circularMask(currentImageFull.shape, rectCenterArr[j][i], rectRadiusArr[j][i])[:,:,0] * currentImageFull)[int(rectCenterArr[j][i][0] - padding):int(rectCenterArr[j][i][0] + padding), int(rectCenterArr[j][i][1] - padding):int(rectCenterArr[j][i][1] + padding)]
# Which is the kernel and which is the reference image doesn't really matter
# (as long as we are consistent)
# We can choose our bounds based on the previous value of the rotation
if i >= 1 and not np.isnan(rectAngleArr[j,i-1]):
rotationBounds = (rectAngleArr[j,i-1] - .1, rectAngleArr[j,i-1] + .1)
else:
# If either i=0 or the previous rotation value is nan, we should start around 0
# anyway (since we define 0 arbitrarily)
rotationBounds = (-.2, .2)
# .003 was chosen based on the data presented in the wiki
# https://github.com/Jfeatherstone/pepe/wiki/Angular-Convolution
thetaArr, convArr = angularConvolution(refImages[j], currImage, dTheta=.003, angleBounds=rotationBounds)
rectAngleArr[j,i] = thetaArr[findPeaksMulti(convArr)[0][0][0]]
# Reuse the name of the folder the images come from as a part of
# the output folder name
# [-2] element for something of form 'path/to/final/folder/' will be 'folder'
# If we are missing the final /, you have to take just the [-1] element
if settings["imageDirectory"][-1] == '/':
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-2] + f'_Synthetic{settings["outputExtension"]}/'
else:
outputFolderPath = outputRootFolder + settings["imageDirectory"].split('/')[-1] + f'_Synthetic{settings["outputExtension"]}/'
if not os.path.exists(outputFolderPath):
os.mkdir(outputFolderPath)
if settings["saveMovie"]:
imageArr[0].save(outputFolderPath + 'Synthetic.gif', save_all=True, append_images=imageArr[1:], duration=30, optimize=False, loop=0)
# Write a readme file that contains all of the parameters that the solving used
lines = ['#####################\n',
'# README FILE #\n',
'#####################\n']
lines += [f'Generated: {time.ctime()}\n\n']
lines += ['Note: this file was autogenerated by the `pepe.auto.forceSolve()` function\n',
' and it is not recommended to be manually edited. To reuse the settings\n',
' and parameters that were used here, the path of this file\n',
f' (\'{outputFolderPath}readme.txt\') \n',
' can be passed via the \'settingsFile\' keyword argument of `pepe.auto.forceSolve()`.\n',
' In this case, explictly passed arguments will override the values in the settings file.\n']
lines += ['\n## Runtime Information\n',
f'Version: pepe {pepe.__version__}\n',
f'Total runtime: {time.perf_counter() - overallStartTime:.6}s\n',
f'Mean tracking time: {np.mean(trackingTimes):.4}s\n',
f'Mean guess generation time: {np.mean(initialGuessTimes):.4}s\n',
f'Mean optimization time: {np.mean(optimizationTimes):.4}s\n',
f'Mean misc. time: {np.mean(miscTimes):.4}s\n',
f'Number of failed particles: {totalFailedParticles}\n']
settings.update(circleTrackingKwargs)
settings.update(optimizationKwargs)
lines += ['\n## Settings\n']
for k,v in settings.items():
lines += [f'{k}: {v}\n']
lines += ['\n## Errors\n']
if len(errorMsgs) > 0:
lines += errorMsgs
else:
lines += ['None :)']
with open(outputFolderPath + 'readme.txt', 'w') as readmeFile:
readmeFile.writelines(lines)
# Save the arrays to pickle files (optional)
if settings["pickleArrays"]:
with open(outputFolderPath + 'forces.pickle', 'wb') as f:
pickle.dump(rectForceArr, f)
with open(outputFolderPath + 'alphas.pickle', 'wb') as f:
pickle.dump(rectAlphaArr, f)
with open(outputFolderPath + 'betas.pickle', 'wb') as f:
pickle.dump(rectBetaArr, f)
with open(outputFolderPath + 'centers.pickle', 'wb') as f:
pickle.dump(rectCenterArr, f)
with open(outputFolderPath + 'radii.pickle', 'wb') as f:
pickle.dump(rectRadiusArr, f)
with open(outputFolderPath + 'angles.pickle', 'wb') as f:
pickle.dump(rectAngleArr, f)
# Save the raw arrays too, since I think I have a bug in my rectangularization process
# if settings["pickleArrays"]:
# with open(outputFolderPath + 'forces_raw.pickle', 'wb') as f:
# pickle.dump(forceArr, f)
#
# with open(outputFolderPath + 'alphas_raw.pickle', 'wb') as f:
# pickle.dump(alphaArr, f)
#
# with open(outputFolderPath + 'betas_raw.pickle', 'wb') as f:
# pickle.dump(betaArr, f)
#
# with open(outputFolderPath + 'centers_raw.pickle', 'wb') as f:
# pickle.dump(centersArr, f)
#
# with open(outputFolderPath + 'radii_raw.pickle', 'wb') as f:
# pickle.dump(radiiArr, f)
# Generate a fit report (optional)
# This include informtaion about the error for each frame, all of the forces/alphas/betas/
# centers/radii for each particle at each timestep, and all settings in a nicely compiled
# (via latex) pdf.
if settings["genFitReport"]:
# Make the source directory
if not os.path.exists(outputFolderPath + 'FitReport_src'):
os.mkdir(outputFolderPath + 'FitReport_src')
# First, generate a plot of the error
fig, ax = plt.subplots()
ax.plot(errorArr)
ax.set_xlabel('Frame')
ax.set_ylabel('Mean-squared error')
ax.set_title('Difference Between Optimized Result and Real Image')
fig.savefig(outputFolderPath + 'FitReport_src/error.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/error.png')
plt.close(fig)
# Draw all of the circles, with their labeled numbers
fig, ax = plt.subplots(1, 2, figsize=(8,3))
# First timestep
visCircles([rectCenterArr[i][0] for i in range(len(rectCenterArr))], [rectRadiusArr[i][0] for i in range(len(rectRadiusArr))],
ax=ax[0], annotations=np.arange(len(rectCenterArr)), setBounds=True)
# Last timestep
visCircles([rectCenterArr[i][-1] for i in range(len(rectCenterArr))], [rectRadiusArr[i][-1] for i in range(len(rectRadiusArr))],
ax=ax[1], annotations=np.arange(len(rectCenterArr)), setBounds=True)
for i in range(2):
ax[i].set_xlabel('X [px]')
ax[i].set_ylabel('Y [px]')
ax[i].invert_yaxis()
ax[0].set_title('First Frame')
ax[1].set_title('Last Frame')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.pdf')
fig.savefig(outputFolderPath + 'FitReport_src/particle_identities.png')
plt.close(fig)
# Next, draw the forces/betas/alphas/centers for each particle
# through time
for i in range(len(rectForceArr)):
fig, ax = visForces(rectForceArr[i], rectAlphaArr[i], rectBetaArr[i], rectCenterArr[i], rectAngleArr[i])
fig.suptitle(f'Particle {i}')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.pdf')
fig.savefig(outputFolderPath + f'FitReport_src/particle_{i}_forces.png')
plt.close(fig)
# Create a gif of the particle orientation through time, overlaid
# on the original images
visRotation([settings["imageDirectory"] + f for f in imageFiles],
rectCenterArr, rectRadiusArr, rectAngleArr, outputFolderPath + 'FitReport_src/', (0, cropXMin))
# Create gifs of the contacts
forceColors = genColors(len(rectBetaArr))
# The list comprehension is to make sure that we index a particle that actually has forces acting
# on it.
tSteps = len(imageFiles)#len([b for b in rectBetaArr if len(b) > 0][0])
contactPointImages = [None for i in range(tSteps)]
contactAngleImages = [None for i in range(tSteps)]
for i in range(tSteps):
# Have to do this, because the settings variable could be None
startI = settings["imageStartIndex"] if settings["imageStartIndex"] is not None else 0
# First, just the contact points
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactPointImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
# Now the one with angles
fig, ax = plt.subplots()
visCircles([rectCenterArr[p][i] for p in range(len(rectCenterArr))], [rectRadiusArr[p][i] for p in range(len(rectRadiusArr))], ax=ax)
for particleIndex in range(len(rectBetaArr)):
visContacts(rectCenterArr[particleIndex][i], rectRadiusArr[particleIndex][i],
rectBetaArr[particleIndex][:,i], ax=ax, forceColors=forceColors[particleIndex], alphaArr=rectAlphaArr[particleIndex][:,i])
ax.set_xlim([0, 1280])
ax.set_ylim([0, 1024])
ax.set_aspect('equal')
ax.set_title(f'Frame {i + startI}')
ax.invert_yaxis()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
contactAngleImages[i] = Image.frombytes('RGB', canvas.get_width_height(),
canvas.tostring_rgb())
plt.close(fig)
contactPointImages[0].save(outputFolderPath + 'FitReport_src/contact_points.gif', save_all=True,
append_images=contactPointImages[1:], duration=20, optimize=True, loop=0)
contactAngleImages[0].save(outputFolderPath + 'FitReport_src/contact_angles.gif', save_all=True,
append_images=contactAngleImages[1:], duration=20, optimize=True, loop=0)
if settings["showProgressBar"]:
bar.update()
bar.close()
return rectForceArr, rectAlphaArr, rectBetaArr, rectCenterArr, rectRadiusArr, rectAngleArr
|
import re
from .base import MetricsLayerBase
from .field import Field
from .set import Set
class View(MetricsLayerBase):
def __init__(self, definition: dict = {}, project=None) -> None:
if "sql_table_name" in definition:
definition["sql_table_name"] = self.resolve_sql_table_name(
definition["sql_table_name"], project.looker_env
)
if "sets" not in definition:
definition["sets"] = []
self.project = project
self.validate(definition)
super().__init__(definition)
def validate(self, definition: dict):
required_keys = ["name", "fields"]
for k in required_keys:
if k not in definition:
raise ValueError(f"View missing required key {k}")
def printable_attributes(self):
to_print = ["name", "type", "label", "group_label", "sql_table_name", "number_of_fields"]
attributes = self.to_dict()
attributes["number_of_fields"] = f'{len(attributes.get('fields', []))}'
return {key: attributes.get(key) for key in to_print if attributes.get(key) is not None}
@property
def primary_key(self):
return next((f for f in self.fields() if f.primary_key == "yes"), None)
def collect_errors(self):
fields = self.fields(show_hidden=True)
field_errors = []
for field in fields:
field_errors.extend(field.collect_errors())
if self.primary_key is None:
primary_key_error = (
f"Warning: The view {self.name} does not have a primary key, "
"specify one using the tag primary_key: yes"
)
field_errors += [primary_key_error]
return field_errors
def referenced_fields(self):
fields = self.fields(show_hidden=True)
result = []
for field in fields:
all_fields = [field] + field.get_referenced_sql_query(strings_only=False)
result.extend(all_fields)
return result
def fields(self, show_hidden: bool = True, expand_dimension_groups: bool = False) -> list:
all_fields = self._valid_fields(expand_dimension_groups=expand_dimension_groups)
if show_hidden:
return all_fields
return [field for field in all_fields if field.hidden == "no" or not field.hidden]
def _valid_fields(self, expand_dimension_groups: bool):
if expand_dimension_groups:
fields = []
for f in self._definition.get("fields", []):
field = Field(f, view=self)
if field.field_type == "dimension_group" and field.timeframes:
for timeframe in field.timeframes:
if timeframe == "raw":
continue
field.dimension_group = timeframe
fields.append(field)
elif field.field_type == "dimension_group" and field.intervals:
for interval in field.intervals:
field.dimension_group = f"{interval}s"
fields.append(field)
else:
fields.append(field)
else:
fields = [Field(f, view=self) for f in self._definition.get("fields", [])]
return fields
def _field_name_to_remove(self, field_expr: str):
# Skip the initial - sign
field_clean_expr = field_expr[1:]
if "." in field_clean_expr:
view_name, field_name = field_clean_expr.split(".")
if view_name == self.name:
return field_name
return None
return field_clean_expr
def resolve_sql_table_name(self, sql_table_name: str, looker_env: str):
start_cond, end_cond = "-- if", "--"
if start_cond in sql_table_name:
# Find the condition that is chosen in the looker env
conditions = re.findall(f"{start_cond}([^{end_cond}]*){end_cond}", sql_table_name)
try:
condition = next((cond for cond in conditions if cond.strip() == looker_env))
except StopIteration:
raise ValueError(
f"""Your sql_table_name: '{sql_table_name}' contains a conditional and
we could not match that to the conditional value you passed: {looker_env}"""
)
full_phrase = start_cond + condition + end_cond
# Use regex to extract the value associated with the condition
searchable_sql_table_name = sql_table_name.replace("\n", "")
everything_between = f"{full_phrase}([^{end_cond}]*){end_cond}"
everything_after = f"(?<={full_phrase}).*"
result = re.search(everything_between, searchable_sql_table_name)
if result:
return result.group().replace(end_cond, "").strip()
result = re.search(everything_after, searchable_sql_table_name)
return result.group().strip()
return sql_table_name
def list_sets(self):
return [Set({**s, "view_name": self.name}, project=self.project) for s in self.sets]
def get_set(self, set_name: str):
return next((s for s in self.list_sets() if s.name == set_name), None)
| import re
from .base import MetricsLayerBase
from .field import Field
from .set import Set
class View(MetricsLayerBase):
def __init__(self, definition: dict = {}, project=None) -> None:
if "sql_table_name" in definition:
definition["sql_table_name"] = self.resolve_sql_table_name(
definition["sql_table_name"], project.looker_env
)
if "sets" not in definition:
definition["sets"] = []
self.project = project
self.validate(definition)
super().__init__(definition)
def validate(self, definition: dict):
required_keys = ["name", "fields"]
for k in required_keys:
if k not in definition:
raise ValueError(f"View missing required key {k}")
def printable_attributes(self):
to_print = ["name", "type", "label", "group_label", "sql_table_name", "number_of_fields"]
attributes = self.to_dict()
attributes["number_of_fields"] = f'{len(attributes.get("fields", []))}'
return {key: attributes.get(key) for key in to_print if attributes.get(key) is not None}
@property
def primary_key(self):
return next((f for f in self.fields() if f.primary_key == "yes"), None)
def collect_errors(self):
fields = self.fields(show_hidden=True)
field_errors = []
for field in fields:
field_errors.extend(field.collect_errors())
if self.primary_key is None:
primary_key_error = (
f"Warning: The view {self.name} does not have a primary key, "
"specify one using the tag primary_key: yes"
)
field_errors += [primary_key_error]
return field_errors
def referenced_fields(self):
fields = self.fields(show_hidden=True)
result = []
for field in fields:
all_fields = [field] + field.get_referenced_sql_query(strings_only=False)
result.extend(all_fields)
return result
def fields(self, show_hidden: bool = True, expand_dimension_groups: bool = False) -> list:
all_fields = self._valid_fields(expand_dimension_groups=expand_dimension_groups)
if show_hidden:
return all_fields
return [field for field in all_fields if field.hidden == "no" or not field.hidden]
def _valid_fields(self, expand_dimension_groups: bool):
if expand_dimension_groups:
fields = []
for f in self._definition.get("fields", []):
field = Field(f, view=self)
if field.field_type == "dimension_group" and field.timeframes:
for timeframe in field.timeframes:
if timeframe == "raw":
continue
field.dimension_group = timeframe
fields.append(field)
elif field.field_type == "dimension_group" and field.intervals:
for interval in field.intervals:
field.dimension_group = f"{interval}s"
fields.append(field)
else:
fields.append(field)
else:
fields = [Field(f, view=self) for f in self._definition.get("fields", [])]
return fields
def _field_name_to_remove(self, field_expr: str):
# Skip the initial - sign
field_clean_expr = field_expr[1:]
if "." in field_clean_expr:
view_name, field_name = field_clean_expr.split(".")
if view_name == self.name:
return field_name
return None
return field_clean_expr
def resolve_sql_table_name(self, sql_table_name: str, looker_env: str):
start_cond, end_cond = "-- if", "--"
if start_cond in sql_table_name:
# Find the condition that is chosen in the looker env
conditions = re.findall(f"{start_cond}([^{end_cond}]*){end_cond}", sql_table_name)
try:
condition = next((cond for cond in conditions if cond.strip() == looker_env))
except StopIteration:
raise ValueError(
f"""Your sql_table_name: '{sql_table_name}' contains a conditional and
we could not match that to the conditional value you passed: {looker_env}"""
)
full_phrase = start_cond + condition + end_cond
# Use regex to extract the value associated with the condition
searchable_sql_table_name = sql_table_name.replace("\n", "")
everything_between = f"{full_phrase}([^{end_cond}]*){end_cond}"
everything_after = f"(?<={full_phrase}).*"
result = re.search(everything_between, searchable_sql_table_name)
if result:
return result.group().replace(end_cond, "").strip()
result = re.search(everything_after, searchable_sql_table_name)
return result.group().strip()
return sql_table_name
def list_sets(self):
return [Set({**s, "view_name": self.name}, project=self.project) for s in self.sets]
def get_set(self, set_name: str):
return next((s for s in self.list_sets() if s.name == set_name), None)
|
import numpy as np
import logging
from typing import Any, Optional, Text, List, Type, Dict, Tuple
import rasa.core.utils
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.components import Component, UnsupportedLanguageError
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.nlu.model import Metadata
import rasa.shared.utils.io
from rasa.shared.nlu.training_data.features import Features
from rasa.nlu.tokenizers.tokenizer import Tokenizer, Token
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import (
DENSE_FEATURIZABLE_ATTRIBUTES,
SEQUENCE_FEATURES,
SENTENCE_FEATURES,
FEATURIZER_CLASS_ALIAS,
NO_LENGTH_RESTRICTION,
NUMBER_OF_SUB_TOKENS,
TOKENS_NAMES,
LANGUAGE_MODEL_DOCS,
)
from rasa.shared.nlu.constants import (
TEXT,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
ACTION_TEXT,
)
from rasa.utils import train_utils
MAX_SEQUENCE_LENGTHS = {
"bert": 512,
"gpt": 512,
"gpt2": 512,
"xlnet": NO_LENGTH_RESTRICTION,
"distilbert": 512,
"roberta": 512,
}
logger = logging.getLogger(__name__)
class LanguageModelFeaturizer(DenseFeaturizer):
"""Featurizer using transformer-based language models.
The transformers(https://github.com/huggingface/transformers) library
is used to load pre-trained language models like BERT, GPT-2, etc.
The component also tokenizes and featurizes dense featurizable attributes of
each message.
"""
defaults = {
# name of the language model to load.
"model_name": "bert",
# Pre-Trained weights to be loaded(string)
"model_weights": None,
# an optional path to a specific directory to download
# and cache the pre-trained model weights.
"cache_dir": None,
}
@classmethod
def required_components(cls) -> List[Type[Component]]:
"""Packages needed to be installed."""
return [Tokenizer]
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
skip_model_load: bool = False,
hf_transformers_loaded: bool = False,
) -> None:
"""Initializes LanguageModelFeaturizer with the specified model.
Args:
component_config: Configuration for the component.
skip_model_load: Skip loading the model for pytests.
hf_transformers_loaded: Skip loading of model and metadata, use
HFTransformers output instead.
"""
super(LanguageModelFeaturizer, self).__init__(component_config)
if hf_transformers_loaded:
return
self._load_model_metadata()
self._load_model_instance(skip_model_load)
@classmethod
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "DenseFeaturizer":
language = config.language
if not cls.can_handle_language(language):
# check failed
raise UnsupportedLanguageError(cls.name, language)
# TODO: remove this when HFTransformersNLP is removed for good
if isinstance(config, Metadata):
hf_transformers_loaded = "HFTransformersNLP" in [
c["name"] for c in config.metadata["pipeline"]
]
else:
hf_transformers_loaded = "HFTransformersNLP" in config.component_names
return cls(component_config, hf_transformers_loaded=hf_transformers_loaded)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
"""Load this component from file.
After a component has been trained, it will be persisted by
calling `persist`. When the pipeline gets loaded again,
this component needs to be able to restore itself.
Components can rely on any context attributes that are
created by :meth:`components.Component.create`
calls to components previous to this one.
This method differs from the parent method only in that it calls create
rather than the constructor if the component is not found. This is to
trigger the check for HFTransformersNLP and the method can be removed
when HFTRansformersNLP is removed.
Args:
meta: Any configuration parameter related to the model.
model_dir: The directory to load the component from.
model_metadata: The model's :class:`rasa.nlu.model.Metadata`.
cached_component: The cached component.
Returns:
the loaded component
"""
# TODO: remove this when HFTransformersNLP is removed for good
if cached_component:
return cached_component
return cls.create(meta, model_metadata)
def _load_model_metadata(self) -> None:
"""Load the metadata for the specified model and sets these properties.
This includes the model name, model weights, cache directory and the
maximum sequence length the model can handle.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_class_dict,
model_weights_defaults,
)
self.model_name = self.component_config["model_name"]
if self.model_name not in model_class_dict:
raise KeyError(
f"'{self.model_name}' not a valid model name. Choose from "
f"{str(list(model_class_dict.keys()))} or create"
f"a new class inheriting from this class to support your model."
)
self.model_weights = self.component_config["model_weights"]
self.cache_dir = self.component_config["cache_dir"]
if not self.model_weights:
logger.info(
f"Model weights not specified. Will choose default model "
f"weights: {model_weights_defaults[self.model_name]}"
)
self.model_weights = model_weights_defaults[self.model_name]
self.max_model_sequence_length = MAX_SEQUENCE_LENGTHS[self.model_name]
def _load_model_instance(self, skip_model_load: bool) -> None:
"""Try loading the model instance.
Args:
skip_model_load: Skip loading the model instances to save time. This
should be True only for pytests
"""
if skip_model_load:
# This should be True only during pytests
return
from rasa.nlu.utils.hugging_face.registry import (
model_class_dict,
model_tokenizer_dict,
)
logger.debug(f"Loading Tokenizer and Model for {self.model_name}")
self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(
self.model_weights, cache_dir=self.cache_dir
)
self.model = model_class_dict[self.model_name].from_pretrained(
self.model_weights, cache_dir=self.cache_dir
)
# Use a universal pad token since all transformer architectures do not have a
# consistent token. Instead of pad_token_id we use unk_token_id because
# pad_token_id is not set for all architectures. We can't add a new token as
# well since vocabulary resizing is not yet supported for TF classes.
# Also, this does not hurt the model predictions since we use an attention mask
# while feeding input.
self.pad_token_id = self.tokenizer.unk_token_id
@classmethod
def cache_key(
cls, component_meta: Dict[Text, Any], model_metadata: Metadata
) -> Optional[Text]:
"""Cache the component for future use.
Args:
component_meta: configuration for the component.
model_metadata: configuration for the whole pipeline.
Returns: key of the cache for future retrievals.
"""
weights = component_meta.get("model_weights") or {}
return (
f"{cls.name}-{component_meta.get("model_name")}-"
f"{rasa.shared.utils.io.deep_container_fingerprint(weights)}"
)
@classmethod
def required_packages(cls) -> List[Text]:
"""Packages needed to be installed."""
return ["transformers"]
def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:
"""Pass the text through the tokenizer of the language model.
Args:
text: Text to be tokenized.
Returns: List of token ids and token strings.
"""
split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)
split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)
return split_token_ids, split_token_strings
def _add_lm_specific_special_tokens(
self, token_ids: List[List[int]]
) -> List[List[int]]:
"""Add language model specific special tokens which were used during
their training.
Args:
token_ids: List of token ids for each example in the batch.
Returns: Augmented list of token ids for each example in the batch.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_special_tokens_pre_processors,
)
augmented_tokens = [
model_special_tokens_pre_processors[self.model_name](example_token_ids)
for example_token_ids in token_ids
]
return augmented_tokens
def _lm_specific_token_cleanup(
self, split_token_ids: List[int], token_strings: List[Text]
) -> Tuple[List[int], List[Text]]:
"""Clean up special chars added by tokenizers of language models.
Many language models add a special char in front/back of (some) words. We clean
up those chars as they are not
needed once the features are already computed.
Args:
split_token_ids: List of token ids received as output from the language
model specific tokenizer.
token_strings: List of token strings received as output from the language
model specific tokenizer.
Returns: Cleaned up token ids and token strings.
"""
from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners
return model_tokens_cleaners[self.model_name](split_token_ids, token_strings)
def _post_process_sequence_embeddings(
self, sequence_embeddings: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute sentence and sequence level representations for relevant tokens.
Args:
sequence_embeddings: Sequence level dense features received as output from
language model.
Returns: Sentence and sequence level representations.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_embeddings_post_processors,
)
sentence_embeddings = []
post_processed_sequence_embeddings = []
for example_embedding in sequence_embeddings:
(
example_sentence_embedding,
example_post_processed_embedding,
) = model_embeddings_post_processors[self.model_name](example_embedding)
sentence_embeddings.append(example_sentence_embedding)
post_processed_sequence_embeddings.append(example_post_processed_embedding)
return (
np.array(sentence_embeddings),
np.array(post_processed_sequence_embeddings),
)
def _tokenize_example(
self, message: Message, attribute: Text
) -> Tuple[List[Token], List[int]]:
"""Tokenize a single message example.
Many language models add a special char in front of (some) words and split
words into sub-words. To ensure the entity start and end values matches the
token values, use the tokens produced by the Tokenizer component. If
individual tokens are split up into multiple tokens, we add this information
to the respected token.
Args:
message: Single message object to be processed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
Returns: List of token strings and token ids for the corresponding
attribute of the message.
"""
tokens_in = message.get(TOKENS_NAMES[attribute])
tokens_out = []
token_ids_out = []
for token in tokens_in:
# use lm specific tokenizer to further tokenize the text
split_token_ids, split_token_strings = self._lm_tokenize(token.text)
if not split_token_ids:
# fix the situation that `token.text` only contains whitespace or other
# special characters, which cause `split_token_ids` and
# `split_token_strings` be empty, finally cause
# `self._lm_specific_token_cleanup()` to raise an exception
continue
(split_token_ids, split_token_strings) = self._lm_specific_token_cleanup(
split_token_ids, split_token_strings
)
token_ids_out += split_token_ids
token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings))
tokens_out.append(token)
return tokens_out, token_ids_out
def _get_token_ids_for_batch(
self, batch_examples: List[Message], attribute: Text
) -> Tuple[List[List[Token]], List[List[int]]]:
"""Compute token ids and token strings for each example in batch.
A token id is the id of that token in the vocabulary of the language model.
Args:
batch_examples: Batch of message objects for which tokens need to be
computed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
Returns: List of token strings and token ids for each example in the batch.
"""
batch_token_ids = []
batch_tokens = []
for example in batch_examples:
example_tokens, example_token_ids = self._tokenize_example(
example, attribute
)
batch_tokens.append(example_tokens)
batch_token_ids.append(example_token_ids)
return batch_tokens, batch_token_ids
@staticmethod
def _compute_attention_mask(
actual_sequence_lengths: List[int], max_input_sequence_length: int
) -> np.ndarray:
"""Compute a mask for padding tokens.
This mask will be used by the language model so that it does not attend to
padding tokens.
Args:
actual_sequence_lengths: List of length of each example without any
padding.
max_input_sequence_length: Maximum length of a sequence that will be
present in the input batch. This is
after taking into consideration the maximum input sequence the model
can handle. Hence it can never be
greater than self.max_model_sequence_length in case the model
applies length restriction.
Returns: Computed attention mask, 0 for padding and 1 for non-padding
tokens.
"""
attention_mask = []
for actual_sequence_length in actual_sequence_lengths:
# add 1s for present tokens, fill up the remaining space up to max
# sequence length with 0s (non-existing tokens)
padded_sequence = [1] * min(
actual_sequence_length, max_input_sequence_length
) + [0] * (
max_input_sequence_length
- min(actual_sequence_length, max_input_sequence_length)
)
attention_mask.append(padded_sequence)
attention_mask = np.array(attention_mask).astype(np.float32)
return attention_mask
def _extract_sequence_lengths(
self, batch_token_ids: List[List[int]]
) -> Tuple[List[int], int]:
"""Extracts the sequence length for each example and maximum sequence length.
Args:
batch_token_ids: List of token ids for each example in the batch.
Returns:
Tuple consisting of: the actual sequence lengths for each example,
and the maximum input sequence length (taking into account the
maximum sequence length that the model can handle.
"""
# Compute max length across examples
max_input_sequence_length = 0
actual_sequence_lengths = []
for example_token_ids in batch_token_ids:
sequence_length = len(example_token_ids)
actual_sequence_lengths.append(sequence_length)
max_input_sequence_length = max(
max_input_sequence_length, len(example_token_ids)
)
# Take into account the maximum sequence length the model can handle
max_input_sequence_length = (
max_input_sequence_length
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION
else min(max_input_sequence_length, self.max_model_sequence_length)
)
return actual_sequence_lengths, max_input_sequence_length
def _add_padding_to_batch(
self, batch_token_ids: List[List[int]], max_sequence_length_model: int
) -> List[List[int]]:
"""Add padding so that all examples in the batch are of the same length.
Args:
batch_token_ids: Batch of examples where each example is a non-padded list
of token ids.
max_sequence_length_model: Maximum length of any input sequence in the batch
to be fed to the model.
Returns:
Padded batch with all examples of the same length.
"""
padded_token_ids = []
# Add padding according to max_sequence_length
# Some models don't contain pad token, we use unknown token as padding token.
# This doesn't affect the computation since we compute an attention mask
# anyways.
for example_token_ids in batch_token_ids:
# Truncate any longer sequences so that they can be fed to the model
if len(example_token_ids) > max_sequence_length_model:
example_token_ids = example_token_ids[:max_sequence_length_model]
padded_token_ids.append(
example_token_ids
+ [self.pad_token_id]
* (max_sequence_length_model - len(example_token_ids))
)
return padded_token_ids
@staticmethod
def _extract_nonpadded_embeddings(
embeddings: np.ndarray, actual_sequence_lengths: List[int]
) -> np.ndarray:
"""Extract embeddings for actual tokens.
Use pre-computed non-padded lengths of each example to extract embeddings
for non-padding tokens.
Args:
embeddings: sequence level representations for each example of the batch.
actual_sequence_lengths: non-padded lengths of each example of the batch.
Returns:
Sequence level embeddings for only non-padding tokens of the batch.
"""
nonpadded_sequence_embeddings = []
for index, embedding in enumerate(embeddings):
unmasked_embedding = embedding[: actual_sequence_lengths[index]]
nonpadded_sequence_embeddings.append(unmasked_embedding)
return np.array(nonpadded_sequence_embeddings)
def _compute_batch_sequence_features(
self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]
) -> np.ndarray:
"""Feed the padded batch to the language model.
Args:
batch_attention_mask: Mask of 0s and 1s which indicate whether the token
is a padding token or not.
padded_token_ids: Batch of token ids for each example. The batch is padded
and hence can be fed at once.
Returns:
Sequence level representations from the language model.
"""
model_outputs = self.model(
np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)
)
# sequence hidden states is always the first output from all models
sequence_hidden_states = model_outputs[0]
sequence_hidden_states = sequence_hidden_states.numpy()
return sequence_hidden_states
def _validate_sequence_lengths(
self,
actual_sequence_lengths: List[int],
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> None:
"""Validate if sequence lengths of all inputs are less the max sequence
length the model can handle.
This method should throw an error during training, whereas log a debug
message during inference if any of the input examples have a length
greater than maximum sequence length allowed.
Args:
actual_sequence_lengths: original sequence length of all inputs
batch_examples: all message instances in the batch
attribute: attribute of message object to be processed
inference_mode: Whether this is during training or during inferencing
"""
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:
# There is no restriction on sequence length from the model
return
for sequence_length, example in zip(actual_sequence_lengths, batch_examples):
if sequence_length > self.max_model_sequence_length:
if not inference_mode:
raise RuntimeError(
f"The sequence length of '{example.get(attribute)[:20]}...' "
f"is too long({sequence_length} tokens) for the "
f"model chosen {self.model_name} which has a maximum "
f"sequence length of {self.max_model_sequence_length} tokens. "
f"Either shorten the message or use a model which has no "
f"restriction on input sequence length like XLNet."
)
logger.debug(
f"The sequence length of '{example.get(attribute)[:20]}...' "
f"is too long({sequence_length} tokens) for the "
f"model chosen {self.model_name} which has a maximum "
f"sequence length of {self.max_model_sequence_length} tokens. "
f"Downstream model predictions may be affected because of this."
)
def _add_extra_padding(
self, sequence_embeddings: np.ndarray, actual_sequence_lengths: List[int]
) -> np.ndarray:
"""Add extra zero padding to match the original sequence length.
This is only done if the input was truncated during the batch
preparation of input for the model.
Args:
sequence_embeddings: Embeddings returned from the model
actual_sequence_lengths: original sequence length of all inputs
Returns:
Modified sequence embeddings with padding if necessary
"""
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:
# No extra padding needed because there wouldn't have been any
# truncation in the first place
return sequence_embeddings
reshaped_sequence_embeddings = []
for index, embedding in enumerate(sequence_embeddings):
embedding_size = embedding.shape[-1]
if actual_sequence_lengths[index] > self.max_model_sequence_length:
embedding = np.concatenate(
[
embedding,
np.zeros(
(
actual_sequence_lengths[index]
- self.max_model_sequence_length,
embedding_size,
),
dtype=np.float32,
),
]
)
reshaped_sequence_embeddings.append(embedding)
return np.array(reshaped_sequence_embeddings)
def _get_model_features_for_batch(
self,
batch_token_ids: List[List[int]],
batch_tokens: List[List[Token]],
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute dense features of each example in the batch.
We first add the special tokens corresponding to each language model. Next, we
add appropriate padding and compute a mask for that padding so that it doesn't
affect the feature computation. The padded batch is next fed to the language
model and token level embeddings are computed. Using the pre-computed mask,
embeddings for non-padding tokens are extracted and subsequently sentence
level embeddings are computed.
Args:
batch_token_ids: List of token ids of each example in the batch.
batch_tokens: List of token objects for each example in the batch.
batch_examples: List of examples in the batch.
attribute: attribute of the Message object to be processed.
inference_mode: Whether the call is during training or during inference.
Returns:
Sentence and token level dense representations.
"""
# Let's first add tokenizer specific special tokens to all examples
batch_token_ids_augmented = self._add_lm_specific_special_tokens(
batch_token_ids
)
# Compute sequence lengths for all examples
(
actual_sequence_lengths,
max_input_sequence_length,
) = self._extract_sequence_lengths(batch_token_ids_augmented)
# Validate that all sequences can be processed based on their sequence
# lengths and the maximum sequence length the model can handle
self._validate_sequence_lengths(
actual_sequence_lengths, batch_examples, attribute, inference_mode
)
# Add padding so that whole batch can be fed to the model
padded_token_ids = self._add_padding_to_batch(
batch_token_ids_augmented, max_input_sequence_length
)
# Compute attention mask based on actual_sequence_length
batch_attention_mask = self._compute_attention_mask(
actual_sequence_lengths, max_input_sequence_length
)
# Get token level features from the model
sequence_hidden_states = self._compute_batch_sequence_features(
batch_attention_mask, padded_token_ids
)
# Extract features for only non-padding tokens
sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(
sequence_hidden_states, actual_sequence_lengths
)
# Extract sentence level and post-processed features
(
sentence_embeddings,
sequence_embeddings,
) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)
# Pad zeros for examples which were truncated in inference mode.
# This is intentionally done after sentence embeddings have been
# extracted so that they are not affected
sequence_embeddings = self._add_extra_padding(
sequence_embeddings, actual_sequence_lengths
)
# shape of matrix for all sequence embeddings
batch_dim = len(sequence_embeddings)
seq_dim = max(e.shape[0] for e in sequence_embeddings)
feature_dim = sequence_embeddings[0].shape[1]
shape = (batch_dim, seq_dim, feature_dim)
# align features with tokens so that we have just one vector per token
# (don't include sub-tokens)
sequence_embeddings = train_utils.align_token_features(
batch_tokens, sequence_embeddings, shape
)
# sequence_embeddings is a padded numpy array
# remove the padding, keep just the non-zero vectors
sequence_final_embeddings = []
for embeddings, tokens in zip(sequence_embeddings, batch_tokens):
sequence_final_embeddings.append(embeddings[: len(tokens)])
sequence_final_embeddings = np.array(sequence_final_embeddings)
return sentence_embeddings, sequence_final_embeddings
def _get_docs_for_batch(
self,
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> List[Dict[Text, Any]]:
"""Compute language model docs for all examples in the batch.
Args:
batch_examples: Batch of message objects for which language model docs
need to be computed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
inference_mode: Whether the call is during inference or during training.
Returns:
List of language model docs for each message in batch.
"""
hf_transformers_doc = batch_examples[0].get(LANGUAGE_MODEL_DOCS[attribute])
if hf_transformers_doc:
# This should only be the case if the deprecated
# HFTransformersNLP component is used in the pipeline
# TODO: remove this when HFTransformersNLP is removed for good
logging.debug(
f"'{LANGUAGE_MODEL_DOCS[attribute]}' set: this "
f"indicates you're using the deprecated component "
f"HFTransformersNLP, please remove it from your "
f"pipeline."
)
return [ex.get(LANGUAGE_MODEL_DOCS[attribute]) for ex in batch_examples]
batch_tokens, batch_token_ids = self._get_token_ids_for_batch(
batch_examples, attribute
)
(
batch_sentence_features,
batch_sequence_features,
) = self._get_model_features_for_batch(
batch_token_ids, batch_tokens, batch_examples, attribute, inference_mode
)
# A doc consists of
# {'sequence_features': ..., 'sentence_features': ...}
batch_docs = []
for index in range(len(batch_examples)):
doc = {
SEQUENCE_FEATURES: batch_sequence_features[index],
SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),
}
batch_docs.append(doc)
return batch_docs
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Compute tokens and dense features for each message in training data.
Args:
training_data: NLU training data to be tokenized and featurized
config: NLU pipeline config consisting of all components.
"""
batch_size = 64
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
non_empty_examples = list(
filter(lambda x: x.get(attribute), training_data.training_examples)
)
batch_start_index = 0
while batch_start_index < len(non_empty_examples):
batch_end_index = min(
batch_start_index + batch_size, len(non_empty_examples)
)
# Collect batch examples
batch_messages = non_empty_examples[batch_start_index:batch_end_index]
# Construct a doc with relevant features
# extracted(tokens, dense_features)
batch_docs = self._get_docs_for_batch(batch_messages, attribute)
for index, ex in enumerate(batch_messages):
self._set_lm_features(batch_docs[index], ex, attribute)
batch_start_index += batch_size
def process(self, message: Message, **kwargs: Any) -> None:
"""Process an incoming message by computing its tokens and dense features.
Args:
message: Incoming message object
"""
# process of all featurizers operates only on TEXT and ACTION_TEXT attributes,
# because all other attributes are labels which are featurized during training
# and their features are stored by the model itself.
for attribute in {TEXT, ACTION_TEXT}:
if message.get(attribute):
self._set_lm_features(
self._get_docs_for_batch(
[message], attribute=attribute, inference_mode=True
)[0],
message,
attribute,
)
def _set_lm_features(
self, doc: Dict[Text, Any], message: Message, attribute: Text = TEXT
) -> None:
"""Adds the precomputed word vectors to the messages features."""
sequence_features = doc[SEQUENCE_FEATURES]
sentence_features = doc[SENTENCE_FEATURES]
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
final_sentence_features = Features(
sentence_features,
FEATURE_TYPE_SENTENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sentence_features)
| import numpy as np
import logging
from typing import Any, Optional, Text, List, Type, Dict, Tuple
import rasa.core.utils
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.components import Component, UnsupportedLanguageError
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.nlu.model import Metadata
import rasa.shared.utils.io
from rasa.shared.nlu.training_data.features import Features
from rasa.nlu.tokenizers.tokenizer import Tokenizer, Token
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import (
DENSE_FEATURIZABLE_ATTRIBUTES,
SEQUENCE_FEATURES,
SENTENCE_FEATURES,
FEATURIZER_CLASS_ALIAS,
NO_LENGTH_RESTRICTION,
NUMBER_OF_SUB_TOKENS,
TOKENS_NAMES,
LANGUAGE_MODEL_DOCS,
)
from rasa.shared.nlu.constants import (
TEXT,
FEATURE_TYPE_SENTENCE,
FEATURE_TYPE_SEQUENCE,
ACTION_TEXT,
)
from rasa.utils import train_utils
MAX_SEQUENCE_LENGTHS = {
"bert": 512,
"gpt": 512,
"gpt2": 512,
"xlnet": NO_LENGTH_RESTRICTION,
"distilbert": 512,
"roberta": 512,
}
logger = logging.getLogger(__name__)
class LanguageModelFeaturizer(DenseFeaturizer):
"""Featurizer using transformer-based language models.
The transformers(https://github.com/huggingface/transformers) library
is used to load pre-trained language models like BERT, GPT-2, etc.
The component also tokenizes and featurizes dense featurizable attributes of
each message.
"""
defaults = {
# name of the language model to load.
"model_name": "bert",
# Pre-Trained weights to be loaded(string)
"model_weights": None,
# an optional path to a specific directory to download
# and cache the pre-trained model weights.
"cache_dir": None,
}
@classmethod
def required_components(cls) -> List[Type[Component]]:
"""Packages needed to be installed."""
return [Tokenizer]
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
skip_model_load: bool = False,
hf_transformers_loaded: bool = False,
) -> None:
"""Initializes LanguageModelFeaturizer with the specified model.
Args:
component_config: Configuration for the component.
skip_model_load: Skip loading the model for pytests.
hf_transformers_loaded: Skip loading of model and metadata, use
HFTransformers output instead.
"""
super(LanguageModelFeaturizer, self).__init__(component_config)
if hf_transformers_loaded:
return
self._load_model_metadata()
self._load_model_instance(skip_model_load)
@classmethod
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "DenseFeaturizer":
language = config.language
if not cls.can_handle_language(language):
# check failed
raise UnsupportedLanguageError(cls.name, language)
# TODO: remove this when HFTransformersNLP is removed for good
if isinstance(config, Metadata):
hf_transformers_loaded = "HFTransformersNLP" in [
c["name"] for c in config.metadata["pipeline"]
]
else:
hf_transformers_loaded = "HFTransformersNLP" in config.component_names
return cls(component_config, hf_transformers_loaded=hf_transformers_loaded)
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
"""Load this component from file.
After a component has been trained, it will be persisted by
calling `persist`. When the pipeline gets loaded again,
this component needs to be able to restore itself.
Components can rely on any context attributes that are
created by :meth:`components.Component.create`
calls to components previous to this one.
This method differs from the parent method only in that it calls create
rather than the constructor if the component is not found. This is to
trigger the check for HFTransformersNLP and the method can be removed
when HFTRansformersNLP is removed.
Args:
meta: Any configuration parameter related to the model.
model_dir: The directory to load the component from.
model_metadata: The model's :class:`rasa.nlu.model.Metadata`.
cached_component: The cached component.
Returns:
the loaded component
"""
# TODO: remove this when HFTransformersNLP is removed for good
if cached_component:
return cached_component
return cls.create(meta, model_metadata)
def _load_model_metadata(self) -> None:
"""Load the metadata for the specified model and sets these properties.
This includes the model name, model weights, cache directory and the
maximum sequence length the model can handle.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_class_dict,
model_weights_defaults,
)
self.model_name = self.component_config["model_name"]
if self.model_name not in model_class_dict:
raise KeyError(
f"'{self.model_name}' not a valid model name. Choose from "
f"{str(list(model_class_dict.keys()))} or create"
f"a new class inheriting from this class to support your model."
)
self.model_weights = self.component_config["model_weights"]
self.cache_dir = self.component_config["cache_dir"]
if not self.model_weights:
logger.info(
f"Model weights not specified. Will choose default model "
f"weights: {model_weights_defaults[self.model_name]}"
)
self.model_weights = model_weights_defaults[self.model_name]
self.max_model_sequence_length = MAX_SEQUENCE_LENGTHS[self.model_name]
def _load_model_instance(self, skip_model_load: bool) -> None:
"""Try loading the model instance.
Args:
skip_model_load: Skip loading the model instances to save time. This
should be True only for pytests
"""
if skip_model_load:
# This should be True only during pytests
return
from rasa.nlu.utils.hugging_face.registry import (
model_class_dict,
model_tokenizer_dict,
)
logger.debug(f"Loading Tokenizer and Model for {self.model_name}")
self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(
self.model_weights, cache_dir=self.cache_dir
)
self.model = model_class_dict[self.model_name].from_pretrained(
self.model_weights, cache_dir=self.cache_dir
)
# Use a universal pad token since all transformer architectures do not have a
# consistent token. Instead of pad_token_id we use unk_token_id because
# pad_token_id is not set for all architectures. We can't add a new token as
# well since vocabulary resizing is not yet supported for TF classes.
# Also, this does not hurt the model predictions since we use an attention mask
# while feeding input.
self.pad_token_id = self.tokenizer.unk_token_id
@classmethod
def cache_key(
cls, component_meta: Dict[Text, Any], model_metadata: Metadata
) -> Optional[Text]:
"""Cache the component for future use.
Args:
component_meta: configuration for the component.
model_metadata: configuration for the whole pipeline.
Returns: key of the cache for future retrievals.
"""
weights = component_meta.get("model_weights") or {}
return (
f"{cls.name}-{component_meta.get('model_name')}-"
f"{rasa.shared.utils.io.deep_container_fingerprint(weights)}"
)
@classmethod
def required_packages(cls) -> List[Text]:
"""Packages needed to be installed."""
return ["transformers"]
def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:
"""Pass the text through the tokenizer of the language model.
Args:
text: Text to be tokenized.
Returns: List of token ids and token strings.
"""
split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)
split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)
return split_token_ids, split_token_strings
def _add_lm_specific_special_tokens(
self, token_ids: List[List[int]]
) -> List[List[int]]:
"""Add language model specific special tokens which were used during
their training.
Args:
token_ids: List of token ids for each example in the batch.
Returns: Augmented list of token ids for each example in the batch.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_special_tokens_pre_processors,
)
augmented_tokens = [
model_special_tokens_pre_processors[self.model_name](example_token_ids)
for example_token_ids in token_ids
]
return augmented_tokens
def _lm_specific_token_cleanup(
self, split_token_ids: List[int], token_strings: List[Text]
) -> Tuple[List[int], List[Text]]:
"""Clean up special chars added by tokenizers of language models.
Many language models add a special char in front/back of (some) words. We clean
up those chars as they are not
needed once the features are already computed.
Args:
split_token_ids: List of token ids received as output from the language
model specific tokenizer.
token_strings: List of token strings received as output from the language
model specific tokenizer.
Returns: Cleaned up token ids and token strings.
"""
from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners
return model_tokens_cleaners[self.model_name](split_token_ids, token_strings)
def _post_process_sequence_embeddings(
self, sequence_embeddings: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute sentence and sequence level representations for relevant tokens.
Args:
sequence_embeddings: Sequence level dense features received as output from
language model.
Returns: Sentence and sequence level representations.
"""
from rasa.nlu.utils.hugging_face.registry import (
model_embeddings_post_processors,
)
sentence_embeddings = []
post_processed_sequence_embeddings = []
for example_embedding in sequence_embeddings:
(
example_sentence_embedding,
example_post_processed_embedding,
) = model_embeddings_post_processors[self.model_name](example_embedding)
sentence_embeddings.append(example_sentence_embedding)
post_processed_sequence_embeddings.append(example_post_processed_embedding)
return (
np.array(sentence_embeddings),
np.array(post_processed_sequence_embeddings),
)
def _tokenize_example(
self, message: Message, attribute: Text
) -> Tuple[List[Token], List[int]]:
"""Tokenize a single message example.
Many language models add a special char in front of (some) words and split
words into sub-words. To ensure the entity start and end values matches the
token values, use the tokens produced by the Tokenizer component. If
individual tokens are split up into multiple tokens, we add this information
to the respected token.
Args:
message: Single message object to be processed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
Returns: List of token strings and token ids for the corresponding
attribute of the message.
"""
tokens_in = message.get(TOKENS_NAMES[attribute])
tokens_out = []
token_ids_out = []
for token in tokens_in:
# use lm specific tokenizer to further tokenize the text
split_token_ids, split_token_strings = self._lm_tokenize(token.text)
if not split_token_ids:
# fix the situation that `token.text` only contains whitespace or other
# special characters, which cause `split_token_ids` and
# `split_token_strings` be empty, finally cause
# `self._lm_specific_token_cleanup()` to raise an exception
continue
(split_token_ids, split_token_strings) = self._lm_specific_token_cleanup(
split_token_ids, split_token_strings
)
token_ids_out += split_token_ids
token.set(NUMBER_OF_SUB_TOKENS, len(split_token_strings))
tokens_out.append(token)
return tokens_out, token_ids_out
def _get_token_ids_for_batch(
self, batch_examples: List[Message], attribute: Text
) -> Tuple[List[List[Token]], List[List[int]]]:
"""Compute token ids and token strings for each example in batch.
A token id is the id of that token in the vocabulary of the language model.
Args:
batch_examples: Batch of message objects for which tokens need to be
computed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
Returns: List of token strings and token ids for each example in the batch.
"""
batch_token_ids = []
batch_tokens = []
for example in batch_examples:
example_tokens, example_token_ids = self._tokenize_example(
example, attribute
)
batch_tokens.append(example_tokens)
batch_token_ids.append(example_token_ids)
return batch_tokens, batch_token_ids
@staticmethod
def _compute_attention_mask(
actual_sequence_lengths: List[int], max_input_sequence_length: int
) -> np.ndarray:
"""Compute a mask for padding tokens.
This mask will be used by the language model so that it does not attend to
padding tokens.
Args:
actual_sequence_lengths: List of length of each example without any
padding.
max_input_sequence_length: Maximum length of a sequence that will be
present in the input batch. This is
after taking into consideration the maximum input sequence the model
can handle. Hence it can never be
greater than self.max_model_sequence_length in case the model
applies length restriction.
Returns: Computed attention mask, 0 for padding and 1 for non-padding
tokens.
"""
attention_mask = []
for actual_sequence_length in actual_sequence_lengths:
# add 1s for present tokens, fill up the remaining space up to max
# sequence length with 0s (non-existing tokens)
padded_sequence = [1] * min(
actual_sequence_length, max_input_sequence_length
) + [0] * (
max_input_sequence_length
- min(actual_sequence_length, max_input_sequence_length)
)
attention_mask.append(padded_sequence)
attention_mask = np.array(attention_mask).astype(np.float32)
return attention_mask
def _extract_sequence_lengths(
self, batch_token_ids: List[List[int]]
) -> Tuple[List[int], int]:
"""Extracts the sequence length for each example and maximum sequence length.
Args:
batch_token_ids: List of token ids for each example in the batch.
Returns:
Tuple consisting of: the actual sequence lengths for each example,
and the maximum input sequence length (taking into account the
maximum sequence length that the model can handle.
"""
# Compute max length across examples
max_input_sequence_length = 0
actual_sequence_lengths = []
for example_token_ids in batch_token_ids:
sequence_length = len(example_token_ids)
actual_sequence_lengths.append(sequence_length)
max_input_sequence_length = max(
max_input_sequence_length, len(example_token_ids)
)
# Take into account the maximum sequence length the model can handle
max_input_sequence_length = (
max_input_sequence_length
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION
else min(max_input_sequence_length, self.max_model_sequence_length)
)
return actual_sequence_lengths, max_input_sequence_length
def _add_padding_to_batch(
self, batch_token_ids: List[List[int]], max_sequence_length_model: int
) -> List[List[int]]:
"""Add padding so that all examples in the batch are of the same length.
Args:
batch_token_ids: Batch of examples where each example is a non-padded list
of token ids.
max_sequence_length_model: Maximum length of any input sequence in the batch
to be fed to the model.
Returns:
Padded batch with all examples of the same length.
"""
padded_token_ids = []
# Add padding according to max_sequence_length
# Some models don't contain pad token, we use unknown token as padding token.
# This doesn't affect the computation since we compute an attention mask
# anyways.
for example_token_ids in batch_token_ids:
# Truncate any longer sequences so that they can be fed to the model
if len(example_token_ids) > max_sequence_length_model:
example_token_ids = example_token_ids[:max_sequence_length_model]
padded_token_ids.append(
example_token_ids
+ [self.pad_token_id]
* (max_sequence_length_model - len(example_token_ids))
)
return padded_token_ids
@staticmethod
def _extract_nonpadded_embeddings(
embeddings: np.ndarray, actual_sequence_lengths: List[int]
) -> np.ndarray:
"""Extract embeddings for actual tokens.
Use pre-computed non-padded lengths of each example to extract embeddings
for non-padding tokens.
Args:
embeddings: sequence level representations for each example of the batch.
actual_sequence_lengths: non-padded lengths of each example of the batch.
Returns:
Sequence level embeddings for only non-padding tokens of the batch.
"""
nonpadded_sequence_embeddings = []
for index, embedding in enumerate(embeddings):
unmasked_embedding = embedding[: actual_sequence_lengths[index]]
nonpadded_sequence_embeddings.append(unmasked_embedding)
return np.array(nonpadded_sequence_embeddings)
def _compute_batch_sequence_features(
self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]
) -> np.ndarray:
"""Feed the padded batch to the language model.
Args:
batch_attention_mask: Mask of 0s and 1s which indicate whether the token
is a padding token or not.
padded_token_ids: Batch of token ids for each example. The batch is padded
and hence can be fed at once.
Returns:
Sequence level representations from the language model.
"""
model_outputs = self.model(
np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)
)
# sequence hidden states is always the first output from all models
sequence_hidden_states = model_outputs[0]
sequence_hidden_states = sequence_hidden_states.numpy()
return sequence_hidden_states
def _validate_sequence_lengths(
self,
actual_sequence_lengths: List[int],
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> None:
"""Validate if sequence lengths of all inputs are less the max sequence
length the model can handle.
This method should throw an error during training, whereas log a debug
message during inference if any of the input examples have a length
greater than maximum sequence length allowed.
Args:
actual_sequence_lengths: original sequence length of all inputs
batch_examples: all message instances in the batch
attribute: attribute of message object to be processed
inference_mode: Whether this is during training or during inferencing
"""
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:
# There is no restriction on sequence length from the model
return
for sequence_length, example in zip(actual_sequence_lengths, batch_examples):
if sequence_length > self.max_model_sequence_length:
if not inference_mode:
raise RuntimeError(
f"The sequence length of '{example.get(attribute)[:20]}...' "
f"is too long({sequence_length} tokens) for the "
f"model chosen {self.model_name} which has a maximum "
f"sequence length of {self.max_model_sequence_length} tokens. "
f"Either shorten the message or use a model which has no "
f"restriction on input sequence length like XLNet."
)
logger.debug(
f"The sequence length of '{example.get(attribute)[:20]}...' "
f"is too long({sequence_length} tokens) for the "
f"model chosen {self.model_name} which has a maximum "
f"sequence length of {self.max_model_sequence_length} tokens. "
f"Downstream model predictions may be affected because of this."
)
def _add_extra_padding(
self, sequence_embeddings: np.ndarray, actual_sequence_lengths: List[int]
) -> np.ndarray:
"""Add extra zero padding to match the original sequence length.
This is only done if the input was truncated during the batch
preparation of input for the model.
Args:
sequence_embeddings: Embeddings returned from the model
actual_sequence_lengths: original sequence length of all inputs
Returns:
Modified sequence embeddings with padding if necessary
"""
if self.max_model_sequence_length == NO_LENGTH_RESTRICTION:
# No extra padding needed because there wouldn't have been any
# truncation in the first place
return sequence_embeddings
reshaped_sequence_embeddings = []
for index, embedding in enumerate(sequence_embeddings):
embedding_size = embedding.shape[-1]
if actual_sequence_lengths[index] > self.max_model_sequence_length:
embedding = np.concatenate(
[
embedding,
np.zeros(
(
actual_sequence_lengths[index]
- self.max_model_sequence_length,
embedding_size,
),
dtype=np.float32,
),
]
)
reshaped_sequence_embeddings.append(embedding)
return np.array(reshaped_sequence_embeddings)
def _get_model_features_for_batch(
self,
batch_token_ids: List[List[int]],
batch_tokens: List[List[Token]],
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute dense features of each example in the batch.
We first add the special tokens corresponding to each language model. Next, we
add appropriate padding and compute a mask for that padding so that it doesn't
affect the feature computation. The padded batch is next fed to the language
model and token level embeddings are computed. Using the pre-computed mask,
embeddings for non-padding tokens are extracted and subsequently sentence
level embeddings are computed.
Args:
batch_token_ids: List of token ids of each example in the batch.
batch_tokens: List of token objects for each example in the batch.
batch_examples: List of examples in the batch.
attribute: attribute of the Message object to be processed.
inference_mode: Whether the call is during training or during inference.
Returns:
Sentence and token level dense representations.
"""
# Let's first add tokenizer specific special tokens to all examples
batch_token_ids_augmented = self._add_lm_specific_special_tokens(
batch_token_ids
)
# Compute sequence lengths for all examples
(
actual_sequence_lengths,
max_input_sequence_length,
) = self._extract_sequence_lengths(batch_token_ids_augmented)
# Validate that all sequences can be processed based on their sequence
# lengths and the maximum sequence length the model can handle
self._validate_sequence_lengths(
actual_sequence_lengths, batch_examples, attribute, inference_mode
)
# Add padding so that whole batch can be fed to the model
padded_token_ids = self._add_padding_to_batch(
batch_token_ids_augmented, max_input_sequence_length
)
# Compute attention mask based on actual_sequence_length
batch_attention_mask = self._compute_attention_mask(
actual_sequence_lengths, max_input_sequence_length
)
# Get token level features from the model
sequence_hidden_states = self._compute_batch_sequence_features(
batch_attention_mask, padded_token_ids
)
# Extract features for only non-padding tokens
sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(
sequence_hidden_states, actual_sequence_lengths
)
# Extract sentence level and post-processed features
(
sentence_embeddings,
sequence_embeddings,
) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)
# Pad zeros for examples which were truncated in inference mode.
# This is intentionally done after sentence embeddings have been
# extracted so that they are not affected
sequence_embeddings = self._add_extra_padding(
sequence_embeddings, actual_sequence_lengths
)
# shape of matrix for all sequence embeddings
batch_dim = len(sequence_embeddings)
seq_dim = max(e.shape[0] for e in sequence_embeddings)
feature_dim = sequence_embeddings[0].shape[1]
shape = (batch_dim, seq_dim, feature_dim)
# align features with tokens so that we have just one vector per token
# (don't include sub-tokens)
sequence_embeddings = train_utils.align_token_features(
batch_tokens, sequence_embeddings, shape
)
# sequence_embeddings is a padded numpy array
# remove the padding, keep just the non-zero vectors
sequence_final_embeddings = []
for embeddings, tokens in zip(sequence_embeddings, batch_tokens):
sequence_final_embeddings.append(embeddings[: len(tokens)])
sequence_final_embeddings = np.array(sequence_final_embeddings)
return sentence_embeddings, sequence_final_embeddings
def _get_docs_for_batch(
self,
batch_examples: List[Message],
attribute: Text,
inference_mode: bool = False,
) -> List[Dict[Text, Any]]:
"""Compute language model docs for all examples in the batch.
Args:
batch_examples: Batch of message objects for which language model docs
need to be computed.
attribute: Property of message to be processed, one of ``TEXT`` or
``RESPONSE``.
inference_mode: Whether the call is during inference or during training.
Returns:
List of language model docs for each message in batch.
"""
hf_transformers_doc = batch_examples[0].get(LANGUAGE_MODEL_DOCS[attribute])
if hf_transformers_doc:
# This should only be the case if the deprecated
# HFTransformersNLP component is used in the pipeline
# TODO: remove this when HFTransformersNLP is removed for good
logging.debug(
f"'{LANGUAGE_MODEL_DOCS[attribute]}' set: this "
f"indicates you're using the deprecated component "
f"HFTransformersNLP, please remove it from your "
f"pipeline."
)
return [ex.get(LANGUAGE_MODEL_DOCS[attribute]) for ex in batch_examples]
batch_tokens, batch_token_ids = self._get_token_ids_for_batch(
batch_examples, attribute
)
(
batch_sentence_features,
batch_sequence_features,
) = self._get_model_features_for_batch(
batch_token_ids, batch_tokens, batch_examples, attribute, inference_mode
)
# A doc consists of
# {'sequence_features': ..., 'sentence_features': ...}
batch_docs = []
for index in range(len(batch_examples)):
doc = {
SEQUENCE_FEATURES: batch_sequence_features[index],
SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),
}
batch_docs.append(doc)
return batch_docs
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Compute tokens and dense features for each message in training data.
Args:
training_data: NLU training data to be tokenized and featurized
config: NLU pipeline config consisting of all components.
"""
batch_size = 64
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
non_empty_examples = list(
filter(lambda x: x.get(attribute), training_data.training_examples)
)
batch_start_index = 0
while batch_start_index < len(non_empty_examples):
batch_end_index = min(
batch_start_index + batch_size, len(non_empty_examples)
)
# Collect batch examples
batch_messages = non_empty_examples[batch_start_index:batch_end_index]
# Construct a doc with relevant features
# extracted(tokens, dense_features)
batch_docs = self._get_docs_for_batch(batch_messages, attribute)
for index, ex in enumerate(batch_messages):
self._set_lm_features(batch_docs[index], ex, attribute)
batch_start_index += batch_size
def process(self, message: Message, **kwargs: Any) -> None:
"""Process an incoming message by computing its tokens and dense features.
Args:
message: Incoming message object
"""
# process of all featurizers operates only on TEXT and ACTION_TEXT attributes,
# because all other attributes are labels which are featurized during training
# and their features are stored by the model itself.
for attribute in {TEXT, ACTION_TEXT}:
if message.get(attribute):
self._set_lm_features(
self._get_docs_for_batch(
[message], attribute=attribute, inference_mode=True
)[0],
message,
attribute,
)
def _set_lm_features(
self, doc: Dict[Text, Any], message: Message, attribute: Text = TEXT
) -> None:
"""Adds the precomputed word vectors to the messages features."""
sequence_features = doc[SEQUENCE_FEATURES]
sentence_features = doc[SENTENCE_FEATURES]
final_sequence_features = Features(
sequence_features,
FEATURE_TYPE_SEQUENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sequence_features)
final_sentence_features = Features(
sentence_features,
FEATURE_TYPE_SENTENCE,
attribute,
self.component_config[FEATURIZER_CLASS_ALIAS],
)
message.add_features(final_sentence_features)
|
#!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{'v'*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def reorder_tests(tests: List[str]) -> List[str]:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
changed_tests = [f[len(prefix):] for f in changed_tests]
changed_tests = [f[:-len(".py")] for f in changed_tests]
bring_to_front = []
the_rest = []
for test in tests:
if test in changed_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
sorted_tests = bring_to_front + the_rest
if len(sorted_tests) != len(tests):
# Something went wrong, bail out without doing any sorting
return tests
return sorted_tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def reorder_tests(tests: List[str]) -> List[str]:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
changed_tests = [f[len(prefix):] for f in changed_tests]
changed_tests = [f[:-len(".py")] for f in changed_tests]
bring_to_front = []
the_rest = []
for test in tests:
if test in changed_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
sorted_tests = bring_to_front + the_rest
if len(sorted_tests) != len(tests):
# Something went wrong, bail out without doing any sorting
return tests
return sorted_tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
|
import json
from nonebot import logger
from omega_miya.utils.Omega_plugin_utils import HttpFetcher
from omega_miya.utils.Omega_Base import Result
from .request_utils import BiliRequestUtils
from .data_classes import BiliInfo, BiliResult
class BiliDynamic(object):
__DYNAMIC_DETAIL_API_URL = 'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/get_dynamic_detail'
__DYNAMIC_ROOT_URL = 'https://t.bilibili.com/'
__HEADERS = BiliRequestUtils.HEADERS.copy()
__HEADERS.update({'origin': 'https://t.bilibili.com',
'referer': 'https://t.bilibili.com/'})
def __init__(self, dynamic_id: int):
self.dynamic_id = dynamic_id
@property
def dy_id(self):
return str(self.dynamic_id)
async def get_info(self) -> Result.DictResult:
cookies = None
# 检查cookies
cookies_res = BiliRequestUtils.get_cookies()
if cookies_res.success():
cookies = cookies_res.result
paras = {'dynamic_id': self.dy_id}
fetcher = HttpFetcher(
timeout=10, flag='bilibili_dynamic', headers=self.__HEADERS, cookies=cookies)
result = await fetcher.get_json(url=self.__DYNAMIC_DETAIL_API_URL, params=paras)
if result.error:
return result
if result.result.get('code') != 0:
return Result.DictResult(error=True, info=result.result.get('message'), result={})
try:
data_dict = dict(result.result['data']['card'])
return Result.DictResult(error=False, info='Success', result=data_dict)
except Exception as e:
return Result.DictResult(error=True, info=repr(e), result={})
@classmethod
def data_parser(cls, dynamic_data: dict) -> BiliResult.DynamicInfoResult:
"""
解析 get_info 或 get_dynamic_history 获取的动态数据
:param dynamic_data: BiliDynamic.get_info 或 BiliUser.get_dynamic_history 获取的数据类型, 参考 Bilibili api
:return: DynamicInfo
"""
# 解析描述部分
try:
dynamic_desc = dynamic_data['desc']
dynamic_id = dynamic_desc['dynamic_id']
type_ = dynamic_desc['type']
url = f"{cls.__DYNAMIC_ROOT_URL}{dynamic_id}"
# 处理一些特殊情况
if type_ == 1:
# type=1, 这是一条转发的动态
orig_dy_id = dynamic_desc['origin']['dynamic_id']
orig_type = dynamic_desc['origin']['type']
# 备用
# orig_dy_id = dynamic_desc['orig_dy_id']
# orig_type = dynamic_desc['orig_type']
else:
orig_dy_id = 0
orig_type = 0
if type_ == 512:
# 番剧特殊动态类型, 无用户信息
user_id = 0
user_name = '哔哩哔哩番剧'
else:
user_id = dynamic_desc['user_profile']['info']['uid']
user_name = dynamic_desc['user_profile']['info']['uname']
except Exception as e:
logger.error(f'BiliDynamic: Parse dynamic desc failed, error info: {repr(e)}')
return BiliResult.DynamicInfoResult(
error=True, info=f'Parse dynamic desc failed, error: {repr(e)}', result=None)
# 解析内容部分
try:
dynamic_card_data = dynamic_data['card']
dynamic_card = json.loads(dynamic_card_data)
"""
动态type对应如下:
1 转发
2 消息(有图片)
4 消息(无图片)
8 视频投稿
16 小视频(含playurl地址)
32 番剧更新
64 专栏
256 音频
512 番剧更新(含详细信息)
1024 未知(没遇见过)
2048 B站活动相关(直播日历, 草图?计划?之内的)(大概是了)
"""
pictures = []
# type=1, 这是一条转发的动态
if type_ == 1:
origin_user = dynamic_card.get('origin_user')
if origin_user and origin_user['info'].get('uname'):
origin_user_name = origin_user['info'].get('uname')
desc = f'转发了{origin_user_name}的动态'
else:
desc = f'转发了一条动态'
content = dynamic_card['item']['content']
title = None
description = None
# type=2, 这是一条原创的动态(有图片)
elif type_ == 2:
desc = '发布了新动态'
content = dynamic_card['item']['description']
pictures.extend([pic_info['img_src'] for pic_info in dynamic_card['item']['pictures']])
title = None
description = None
# type=4, 这是一条原创的动态(无图片)
elif type_ == 4:
desc = '发布了新动态'
content = dynamic_card['item']['content']
title = None
description = None
# type=8, 这是发布视频
elif type_ == 8:
desc = '发布了新的视频'
content = dynamic_card['dynamic']
pictures.append(dynamic_card['pic'])
title = dynamic_card['title']
description = dynamic_card['desc']
# type=16, 这是小视频(现在似乎已经失效?)
elif type_ == 16:
desc = '发布了新的小视频动态'
content = dynamic_card['item']['description']
title = None
description = None
# type=32, 这是番剧更新
elif type_ == 32:
desc = '发布了新的番剧'
content = dynamic_card['dynamic']
pictures.append(dynamic_card['pic'])
title = dynamic_card['title']
description = None
# type=64, 这是文章动态
elif type_ == 64:
desc = '发布了新的文章'
content = dynamic_card['summary']
pictures.extend(dynamic_card['origin_image_urls'])
title = dynamic_card['title']
description = None
# type=256, 这是音频
elif type_ == 256:
desc = '发布了新的音乐'
content = dynamic_card['intro']
pictures.append(dynamic_card['cover'])
title = dynamic_card['title']
description = None
# type=512, 番剧更新(详情)
elif type_ == 512:
desc = '发布了新的番剧'
content = dynamic_card['index_title']
pictures.append(dynamic_card['cover'])
title = dynamic_card['apiSeasonInfo']['title']
description = None
# type=2048, B站活动相关
elif type_ == 2048:
desc = '发布了一条活动相关动态'
content = dynamic_card['vest']['content']
title = dynamic_card['sketch']['title']
description = dynamic_card['sketch']['desc_text']
# type=4200, 直播间动态(疑似)
elif type_ == 4200:
desc = '发布了一条直播间动态'
content = f"{dynamic_card["uname"]}的直播间 - {dynamic_card["title"]}"
pictures.append(dynamic_card['cover'])
title = dynamic_card['title']
description = None
# 其他未知类型
else:
desc = 'Unknown'
content = 'Unknown'
title = None
description = None
data = BiliInfo.DynamicInfo.DynamicCard(
content=content,
pictures=pictures,
title=title,
description=description
)
except Exception as e:
logger.error(f'BiliDynamic: Parse dynamic card failed, dynamic id: {dynamic_id}, error info: {repr(e)}')
return BiliResult.DynamicInfoResult(
error=True, info=f'Parse dynamic card failed, error: {repr(e)}', result=None)
dynamic_info = BiliInfo.DynamicInfo(
dynamic_id=dynamic_id,
user_id=user_id,
user_name=user_name,
type=type_,
desc=desc,
url=url,
orig_dy_id=orig_dy_id,
orig_type=orig_type,
data=data
)
return BiliResult.DynamicInfoResult(error=False, info='Success', result=dynamic_info)
__all__ = [
'BiliDynamic'
]
| import json
from nonebot import logger
from omega_miya.utils.Omega_plugin_utils import HttpFetcher
from omega_miya.utils.Omega_Base import Result
from .request_utils import BiliRequestUtils
from .data_classes import BiliInfo, BiliResult
class BiliDynamic(object):
__DYNAMIC_DETAIL_API_URL = 'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/get_dynamic_detail'
__DYNAMIC_ROOT_URL = 'https://t.bilibili.com/'
__HEADERS = BiliRequestUtils.HEADERS.copy()
__HEADERS.update({'origin': 'https://t.bilibili.com',
'referer': 'https://t.bilibili.com/'})
def __init__(self, dynamic_id: int):
self.dynamic_id = dynamic_id
@property
def dy_id(self):
return str(self.dynamic_id)
async def get_info(self) -> Result.DictResult:
cookies = None
# 检查cookies
cookies_res = BiliRequestUtils.get_cookies()
if cookies_res.success():
cookies = cookies_res.result
paras = {'dynamic_id': self.dy_id}
fetcher = HttpFetcher(
timeout=10, flag='bilibili_dynamic', headers=self.__HEADERS, cookies=cookies)
result = await fetcher.get_json(url=self.__DYNAMIC_DETAIL_API_URL, params=paras)
if result.error:
return result
if result.result.get('code') != 0:
return Result.DictResult(error=True, info=result.result.get('message'), result={})
try:
data_dict = dict(result.result['data']['card'])
return Result.DictResult(error=False, info='Success', result=data_dict)
except Exception as e:
return Result.DictResult(error=True, info=repr(e), result={})
@classmethod
def data_parser(cls, dynamic_data: dict) -> BiliResult.DynamicInfoResult:
"""
解析 get_info 或 get_dynamic_history 获取的动态数据
:param dynamic_data: BiliDynamic.get_info 或 BiliUser.get_dynamic_history 获取的数据类型, 参考 Bilibili api
:return: DynamicInfo
"""
# 解析描述部分
try:
dynamic_desc = dynamic_data['desc']
dynamic_id = dynamic_desc['dynamic_id']
type_ = dynamic_desc['type']
url = f"{cls.__DYNAMIC_ROOT_URL}{dynamic_id}"
# 处理一些特殊情况
if type_ == 1:
# type=1, 这是一条转发的动态
orig_dy_id = dynamic_desc['origin']['dynamic_id']
orig_type = dynamic_desc['origin']['type']
# 备用
# orig_dy_id = dynamic_desc['orig_dy_id']
# orig_type = dynamic_desc['orig_type']
else:
orig_dy_id = 0
orig_type = 0
if type_ == 512:
# 番剧特殊动态类型, 无用户信息
user_id = 0
user_name = '哔哩哔哩番剧'
else:
user_id = dynamic_desc['user_profile']['info']['uid']
user_name = dynamic_desc['user_profile']['info']['uname']
except Exception as e:
logger.error(f'BiliDynamic: Parse dynamic desc failed, error info: {repr(e)}')
return BiliResult.DynamicInfoResult(
error=True, info=f'Parse dynamic desc failed, error: {repr(e)}', result=None)
# 解析内容部分
try:
dynamic_card_data = dynamic_data['card']
dynamic_card = json.loads(dynamic_card_data)
"""
动态type对应如下:
1 转发
2 消息(有图片)
4 消息(无图片)
8 视频投稿
16 小视频(含playurl地址)
32 番剧更新
64 专栏
256 音频
512 番剧更新(含详细信息)
1024 未知(没遇见过)
2048 B站活动相关(直播日历, 草图?计划?之内的)(大概是了)
"""
pictures = []
# type=1, 这是一条转发的动态
if type_ == 1:
origin_user = dynamic_card.get('origin_user')
if origin_user and origin_user['info'].get('uname'):
origin_user_name = origin_user['info'].get('uname')
desc = f'转发了{origin_user_name}的动态'
else:
desc = f'转发了一条动态'
content = dynamic_card['item']['content']
title = None
description = None
# type=2, 这是一条原创的动态(有图片)
elif type_ == 2:
desc = '发布了新动态'
content = dynamic_card['item']['description']
pictures.extend([pic_info['img_src'] for pic_info in dynamic_card['item']['pictures']])
title = None
description = None
# type=4, 这是一条原创的动态(无图片)
elif type_ == 4:
desc = '发布了新动态'
content = dynamic_card['item']['content']
title = None
description = None
# type=8, 这是发布视频
elif type_ == 8:
desc = '发布了新的视频'
content = dynamic_card['dynamic']
pictures.append(dynamic_card['pic'])
title = dynamic_card['title']
description = dynamic_card['desc']
# type=16, 这是小视频(现在似乎已经失效?)
elif type_ == 16:
desc = '发布了新的小视频动态'
content = dynamic_card['item']['description']
title = None
description = None
# type=32, 这是番剧更新
elif type_ == 32:
desc = '发布了新的番剧'
content = dynamic_card['dynamic']
pictures.append(dynamic_card['pic'])
title = dynamic_card['title']
description = None
# type=64, 这是文章动态
elif type_ == 64:
desc = '发布了新的文章'
content = dynamic_card['summary']
pictures.extend(dynamic_card['origin_image_urls'])
title = dynamic_card['title']
description = None
# type=256, 这是音频
elif type_ == 256:
desc = '发布了新的音乐'
content = dynamic_card['intro']
pictures.append(dynamic_card['cover'])
title = dynamic_card['title']
description = None
# type=512, 番剧更新(详情)
elif type_ == 512:
desc = '发布了新的番剧'
content = dynamic_card['index_title']
pictures.append(dynamic_card['cover'])
title = dynamic_card['apiSeasonInfo']['title']
description = None
# type=2048, B站活动相关
elif type_ == 2048:
desc = '发布了一条活动相关动态'
content = dynamic_card['vest']['content']
title = dynamic_card['sketch']['title']
description = dynamic_card['sketch']['desc_text']
# type=4200, 直播间动态(疑似)
elif type_ == 4200:
desc = '发布了一条直播间动态'
content = f"{dynamic_card['uname']}的直播间 - {dynamic_card['title']}"
pictures.append(dynamic_card['cover'])
title = dynamic_card['title']
description = None
# 其他未知类型
else:
desc = 'Unknown'
content = 'Unknown'
title = None
description = None
data = BiliInfo.DynamicInfo.DynamicCard(
content=content,
pictures=pictures,
title=title,
description=description
)
except Exception as e:
logger.error(f'BiliDynamic: Parse dynamic card failed, dynamic id: {dynamic_id}, error info: {repr(e)}')
return BiliResult.DynamicInfoResult(
error=True, info=f'Parse dynamic card failed, error: {repr(e)}', result=None)
dynamic_info = BiliInfo.DynamicInfo(
dynamic_id=dynamic_id,
user_id=user_id,
user_name=user_name,
type=type_,
desc=desc,
url=url,
orig_dy_id=orig_dy_id,
orig_type=orig_type,
data=data
)
return BiliResult.DynamicInfoResult(error=False, info='Success', result=dynamic_info)
__all__ = [
'BiliDynamic'
]
|
from datetime import datetime, timedelta
from typing import Optional, Sequence, Union, overload
from polars import internals as pli
from polars.datatypes import py_type_to_dtype
from polars.utils import _datetime_to_pl_timestamp, _timedelta_to_pl_duration
try:
from polars.polars import concat_df as _concat_df
from polars.polars import concat_lf as _concat_lf
from polars.polars import concat_series as _concat_series
from polars.polars import py_date_range as _py_date_range
from polars.polars import py_diag_concat_df as _diag_concat_df
_DOCUMENTING = False
except ImportError: # pragma: no cover
_DOCUMENTING = True
def get_dummies(df: "pli.DataFrame") -> "pli.DataFrame":
"""
Convert categorical variables into dummy/indicator variables.
Parameters
----------
df
DataFrame to convert.
"""
return df.to_dummies()
@overload
def concat(
items: Sequence["pli.DataFrame"],
rechunk: bool = True,
how: str = "vertical",
) -> "pli.DataFrame":
...
@overload
def concat(
items: Sequence["pli.Series"],
rechunk: bool = True,
how: str = "vertical",
) -> "pli.Series":
...
def concat(
items: Union[
Sequence["pli.DataFrame"], Sequence["pli.Series"], Sequence["pli.LazyFrame"]
],
rechunk: bool = True,
how: str = "vertical",
) -> Union["pli.DataFrame", "pli.Series", "pli.LazyFrame"]:
"""
Aggregate all the Dataframes/Series in a List of DataFrames/Series to a single DataFrame/Series.
Parameters
----------
items
DataFrames/Series/LazyFrames to concatenate.
rechunk
rechunk the final DataFrame/Series.
how
Only used if the items are DataFrames.
On of {"vertical", "diagonal"}.
Vertical: Applies multiple `vstack` operations.
Diagonal: Finds a union between the column schemas and fills missing column values with null.
"""
if not len(items) > 0:
raise ValueError("cannot concat empty list")
out: Union["pli.Series", "pli.DataFrame", "pli.LazyFrame"]
if isinstance(items[0], pli.DataFrame):
if how == "vertical":
out = pli.wrap_df(_concat_df(items))
elif how == "diagonal":
out = pli.wrap_df(_diag_concat_df(items))
else:
raise ValueError(
f"how should be one of {"vertical", "diagonal"}, got {how}"
)
elif isinstance(items[0], pli.LazyFrame):
return pli.wrap_ldf(_concat_lf(items, rechunk))
else:
out = pli.wrap_s(_concat_series(items))
if rechunk:
return out.rechunk()
return out
def repeat(
val: Union[int, float, str, bool], n: int, name: Optional[str] = None
) -> "pli.Series":
"""
Repeat a single value n times and collect into a Series.
Parameters
----------
val
Value to repeat.
n
Number of repeats.
name
Optional name of the Series.
"""
if name is None:
name = ""
dtype = py_type_to_dtype(type(val))
s = pli.Series._repeat(name, val, n, dtype)
return s
def arg_where(mask: "pli.Series") -> "pli.Series":
"""
Get index values where Boolean mask evaluate True.
Parameters
----------
mask
Boolean Series.
Returns
-------
UInt32 Series
"""
return mask.arg_true()
def date_range(
low: datetime,
high: datetime,
interval: Union[str, timedelta],
closed: Optional[str] = "both",
name: Optional[str] = None,
) -> "pli.Series":
"""
Create a date range of type `Datetime`.
Parameters
----------
low
Lower bound of the date range
high
Upper bound of the date range
interval
Interval periods
A python timedelta object or a polars duration `str`
e.g.: "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
closed {None, 'left', 'right', 'both', 'none'}
Make the interval closed to the 'left', 'right', 'none' or 'both' sides.
name
Name of the output Series
Returns
-------
A Series of type `Datetime`
Examples
--------
>>> from datetime import datetime
>>> pl.date_range(datetime(1985, 1, 1), datetime(2015, 7, 1), "1d12h")
shape: (7426,)
Series: '' [datetime]
[
1985-01-01 00:00:00
1985-01-02 12:00:00
1985-01-04 00:00:00
1985-01-05 12:00:00
1985-01-07 00:00:00
1985-01-08 12:00:00
1985-01-10 00:00:00
1985-01-11 12:00:00
1985-01-13 00:00:00
1985-01-14 12:00:00
1985-01-16 00:00:00
1985-01-17 12:00:00
...
2015-06-14 00:00:00
2015-06-15 12:00:00
2015-06-17 00:00:00
2015-06-18 12:00:00
2015-06-20 00:00:00
2015-06-21 12:00:00
2015-06-23 00:00:00
2015-06-24 12:00:00
2015-06-26 00:00:00
2015-06-27 12:00:00
2015-06-29 00:00:00
2015-06-30 12:00:00
]
"""
if isinstance(interval, timedelta):
interval = _timedelta_to_pl_duration(interval)
start = _datetime_to_pl_timestamp(low)
stop = _datetime_to_pl_timestamp(high)
if name is None:
name = ""
return pli.wrap_s(_py_date_range(start, stop, interval, closed, name))
| from datetime import datetime, timedelta
from typing import Optional, Sequence, Union, overload
from polars import internals as pli
from polars.datatypes import py_type_to_dtype
from polars.utils import _datetime_to_pl_timestamp, _timedelta_to_pl_duration
try:
from polars.polars import concat_df as _concat_df
from polars.polars import concat_lf as _concat_lf
from polars.polars import concat_series as _concat_series
from polars.polars import py_date_range as _py_date_range
from polars.polars import py_diag_concat_df as _diag_concat_df
_DOCUMENTING = False
except ImportError: # pragma: no cover
_DOCUMENTING = True
def get_dummies(df: "pli.DataFrame") -> "pli.DataFrame":
"""
Convert categorical variables into dummy/indicator variables.
Parameters
----------
df
DataFrame to convert.
"""
return df.to_dummies()
@overload
def concat(
items: Sequence["pli.DataFrame"],
rechunk: bool = True,
how: str = "vertical",
) -> "pli.DataFrame":
...
@overload
def concat(
items: Sequence["pli.Series"],
rechunk: bool = True,
how: str = "vertical",
) -> "pli.Series":
...
def concat(
items: Union[
Sequence["pli.DataFrame"], Sequence["pli.Series"], Sequence["pli.LazyFrame"]
],
rechunk: bool = True,
how: str = "vertical",
) -> Union["pli.DataFrame", "pli.Series", "pli.LazyFrame"]:
"""
Aggregate all the Dataframes/Series in a List of DataFrames/Series to a single DataFrame/Series.
Parameters
----------
items
DataFrames/Series/LazyFrames to concatenate.
rechunk
rechunk the final DataFrame/Series.
how
Only used if the items are DataFrames.
On of {"vertical", "diagonal"}.
Vertical: Applies multiple `vstack` operations.
Diagonal: Finds a union between the column schemas and fills missing column values with null.
"""
if not len(items) > 0:
raise ValueError("cannot concat empty list")
out: Union["pli.Series", "pli.DataFrame", "pli.LazyFrame"]
if isinstance(items[0], pli.DataFrame):
if how == "vertical":
out = pli.wrap_df(_concat_df(items))
elif how == "diagonal":
out = pli.wrap_df(_diag_concat_df(items))
else:
raise ValueError(
f"how should be one of {'vertical', 'diagonal'}, got {how}"
)
elif isinstance(items[0], pli.LazyFrame):
return pli.wrap_ldf(_concat_lf(items, rechunk))
else:
out = pli.wrap_s(_concat_series(items))
if rechunk:
return out.rechunk()
return out
def repeat(
val: Union[int, float, str, bool], n: int, name: Optional[str] = None
) -> "pli.Series":
"""
Repeat a single value n times and collect into a Series.
Parameters
----------
val
Value to repeat.
n
Number of repeats.
name
Optional name of the Series.
"""
if name is None:
name = ""
dtype = py_type_to_dtype(type(val))
s = pli.Series._repeat(name, val, n, dtype)
return s
def arg_where(mask: "pli.Series") -> "pli.Series":
"""
Get index values where Boolean mask evaluate True.
Parameters
----------
mask
Boolean Series.
Returns
-------
UInt32 Series
"""
return mask.arg_true()
def date_range(
low: datetime,
high: datetime,
interval: Union[str, timedelta],
closed: Optional[str] = "both",
name: Optional[str] = None,
) -> "pli.Series":
"""
Create a date range of type `Datetime`.
Parameters
----------
low
Lower bound of the date range
high
Upper bound of the date range
interval
Interval periods
A python timedelta object or a polars duration `str`
e.g.: "3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
closed {None, 'left', 'right', 'both', 'none'}
Make the interval closed to the 'left', 'right', 'none' or 'both' sides.
name
Name of the output Series
Returns
-------
A Series of type `Datetime`
Examples
--------
>>> from datetime import datetime
>>> pl.date_range(datetime(1985, 1, 1), datetime(2015, 7, 1), "1d12h")
shape: (7426,)
Series: '' [datetime]
[
1985-01-01 00:00:00
1985-01-02 12:00:00
1985-01-04 00:00:00
1985-01-05 12:00:00
1985-01-07 00:00:00
1985-01-08 12:00:00
1985-01-10 00:00:00
1985-01-11 12:00:00
1985-01-13 00:00:00
1985-01-14 12:00:00
1985-01-16 00:00:00
1985-01-17 12:00:00
...
2015-06-14 00:00:00
2015-06-15 12:00:00
2015-06-17 00:00:00
2015-06-18 12:00:00
2015-06-20 00:00:00
2015-06-21 12:00:00
2015-06-23 00:00:00
2015-06-24 12:00:00
2015-06-26 00:00:00
2015-06-27 12:00:00
2015-06-29 00:00:00
2015-06-30 12:00:00
]
"""
if isinstance(interval, timedelta):
interval = _timedelta_to_pl_duration(interval)
start = _datetime_to_pl_timestamp(low)
stop = _datetime_to_pl_timestamp(high)
if name is None:
name = ""
return pli.wrap_s(_py_date_range(start, stop, interval, closed, name))
|
import sys
import os
import re
from glob import glob
from shutil import rmtree
from subprocess import run
from tools.pdocs import console, pdoc3serve, pdoc3, shipDocs
ORG = "annotation"
REPO = "text-fabric"
PKG = "tf"
PACKAGE = "text-fabric"
SCRIPT = "/Library/Frameworks/Python.framework/Versions/Current/bin/{PACKAGE}"
DIST = "dist"
VERSION_CONFIG = dict(
setup=dict(
file="setup.py",
re=re.compile(r"""version\s*=\s*['"]([^'"]*)['"]"""),
mask="version='{}'",
),
parameters=dict(
file="tf/parameters.py",
re=re.compile(r"""\bVERSION\s*=\s*['"]([^'"]*)['"]"""),
mask="VERSION = '{}'",
),
)
AN_BASE = os.path.expanduser(f"~/github/{ORG}")
TUT_BASE = f"{AN_BASE}/tutorials"
TF_BASE = f"{AN_BASE}/{REPO}"
TEST_BASE = f"{TF_BASE}/test"
APP_BASE = f"{TF_BASE}/apps"
currentVersion = None
newVersion = None
appPat = r"^.*/app-([^/]*)$"
appRe = re.compile(appPat)
apps = set()
for appDir in glob(f"{AN_BASE}/app-*"):
match = appRe.fullmatch(appDir)
if match:
apps.add(match.group(1))
apps = sorted(apps)
appStr = ", ".join(apps)
HELP = f"""
python3 build.py command
command:
-h
--help
docs : serve docs locally
pdocs : build docs
sdocs : ship docs
clean : clean local develop build
l : local develop build
lp : local production build
i : local non-develop build
g : push to github, code and docs
v : show current version
r1 : version becomes r1+1.0.0
r2 : version becomes r1.r2+1.0
r3 : version becomes r1.r2.r3+1
ship : build for shipping
apps : commit and push all tf apps
tut : commit and push the tutorials repo
a : open {PACKAGE} browser on specific dataset
({appStr})
t : run test suite (relations, qperf)
data : build data files for github release
For g and ship you need to pass a commit message.
For data you need to pass an app argument:
{appStr}
"""
def readArgs():
args = sys.argv[1:]
if not len(args) or args[0] in {"-h", "--help", "help"}:
console(HELP)
return (False, None, [])
arg = args[0]
if arg not in {
"a",
"t",
"docs",
"pdocs",
"sdocs",
"clean",
"l",
"lp",
"i",
"g",
"ship",
"data",
"apps",
"tut",
"v",
"r1",
"r2",
"r3",
}:
console(HELP)
return (False, None, [])
if arg in {"g", "apps", "tut", "ship"}:
if len(args) < 2:
console("Provide a commit message")
return (False, None, [])
return (arg, args[1], args[2:])
if arg in {"a", "t", "data"}:
if len(args) < 2:
if arg in {"a", "data"}:
console(f"Provide a data source [{appStr}]")
elif arg in {"t"}:
console("Provide a test suite [relations, qperf]")
return (False, None, [])
return (arg, args[1], args[2:])
return (arg, None, [])
def incVersion(version, task):
comps = [int(c) for c in version.split(".")]
(major, minor, update) = comps
if task == "r1":
major += 1
minor = 0
update = 0
elif task == "r2":
minor += 1
update = 0
elif task == "r3":
update += 1
return ".".join(str(c) for c in (major, minor, update))
def replaceVersion(task, mask):
def subVersion(match):
global currentVersion
global newVersion
currentVersion = match.group(1)
newVersion = incVersion(currentVersion, task)
return mask.format(newVersion)
return subVersion
def showVersion():
global currentVersion
versions = set()
for (key, c) in VERSION_CONFIG.items():
with open(c["file"]) as fh:
text = fh.read()
match = c["re"].search(text)
version = match.group(1)
console(f'{version} (according to {c['file']})')
versions.add(version)
currentVersion = None
if len(versions) == 1:
currentVersion = list(versions)[0]
def adjustVersion(task):
for (key, c) in VERSION_CONFIG.items():
console(f'Adjusting version in {c['file']}')
with open(c["file"]) as fh:
text = fh.read()
text = c["re"].sub(replaceVersion(task, c["mask"]), text)
with open(c["file"], "w") as fh:
fh.write(text)
if currentVersion == newVersion:
console(f"Rebuilding version {newVersion}")
else:
console(f"Replacing version {currentVersion} by {newVersion}")
def makeDist(pypi=True):
distFile = "{}-{}".format(PACKAGE, currentVersion)
distFileCompressed = f"{distFile}.tar.gz"
distPath = f"{DIST}/{distFileCompressed}"
distPath = f"{DIST}/*"
if os.path.exists(DIST):
rmtree(DIST)
os.makedirs(DIST, exist_ok=True)
# run(["python3", "setup.py", "sdist", "bdist_wheel"])
run(["python3", "setup.py", "bdist_wheel"])
if pypi:
run(["twine", "upload", "-u", "dirkroorda", distPath])
# run("./purge.sh", shell=True)
def commit(task, msg):
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
if task in {"ship"}:
tagVersion = f"v{currentVersion}"
commitMessage = f"Release {currentVersion}: {msg}"
run(["git", "tag", "-a", tagVersion, "-m", commitMessage])
run(["git", "push", "origin", "--tags"])
def commitApps(msg):
for app in apps:
os.chdir(f"{AN_BASE}/app-{app}")
console(f"In {os.getcwd()}")
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
os.chdir(f"{TF_BASE}")
def commitTut(msg):
os.chdir(f"{TUT_BASE}")
console(f"In {os.getcwd()}")
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
os.chdir(f"{TF_BASE}")
def tfbrowse(dataset, remaining):
rargs = " ".join(remaining)
cmdLine = f"{PACKAGE} {dataset} {rargs}"
try:
run(cmdLine, shell=True)
except KeyboardInterrupt:
pass
def tftest(suite, remaining):
suiteDir = f"{TEST_BASE}/generic"
suiteFile = f"{suite}.py"
good = True
try:
os.chdir(suiteDir)
except Exception:
good = False
console(f'Cannot find TF test directory "{suiteDir}"')
if not good:
return
if not os.path.exists(suiteFile):
console(f'Cannot find TF test suite "{suite}"')
return
rargs = " ".join(remaining)
cmdLine = f"python3 {suiteFile} -v {rargs}"
try:
run(cmdLine, shell=True)
except KeyboardInterrupt:
pass
def clean():
run(["python3", "setup.py", "develop", "-u"])
if os.path.exists(SCRIPT):
os.unlink(SCRIPT)
run(["pip3", "uninstall", "-y", PACKAGE])
def main():
(task, msg, remaining) = readArgs()
if not task:
return
elif task == "a":
tfbrowse(msg, remaining)
elif task == "t":
tftest(msg, remaining)
elif task == "docs":
pdoc3serve(PKG)
elif task == "pdocs":
pdoc3(PKG)
elif task == "sdocs":
shipDocs(ORG, REPO, PKG)
elif task == "clean":
clean()
elif task == "l":
clean()
run(["python3", "setup.py", "develop"])
elif task == "lp":
clean()
run(["python3", "setup.py", "sdist"])
distFiles = glob(f"dist/{PACKAGE}-*.tar.gz")
run(["pip3", "install", distFiles[0]])
elif task == "i":
clean
makeDist(pypi=False)
run(
[
"pip3",
"install",
"--upgrade",
"--no-index",
"--find-links",
f'file://{TF_BASE}/dist"',
PACKAGE,
]
)
elif task == "g":
shipDocs(ORG, REPO, PKG)
commit(task, msg)
elif task == "apps":
commitApps(msg)
elif task == "tut":
commitTut(msg)
elif task == "v":
showVersion()
elif task in {"r", "r1", "r2", "r3"}:
adjustVersion(task)
elif task == "ship":
showVersion()
if not currentVersion:
console("No current version")
return
answer = input("right version ? [yn]")
if answer != "y":
return
shipDocs(ORG, REPO, PKG)
makeDist()
commit(task, msg)
main()
| import sys
import os
import re
from glob import glob
from shutil import rmtree
from subprocess import run
from tools.pdocs import console, pdoc3serve, pdoc3, shipDocs
ORG = "annotation"
REPO = "text-fabric"
PKG = "tf"
PACKAGE = "text-fabric"
SCRIPT = "/Library/Frameworks/Python.framework/Versions/Current/bin/{PACKAGE}"
DIST = "dist"
VERSION_CONFIG = dict(
setup=dict(
file="setup.py",
re=re.compile(r"""version\s*=\s*['"]([^'"]*)['"]"""),
mask="version='{}'",
),
parameters=dict(
file="tf/parameters.py",
re=re.compile(r"""\bVERSION\s*=\s*['"]([^'"]*)['"]"""),
mask="VERSION = '{}'",
),
)
AN_BASE = os.path.expanduser(f"~/github/{ORG}")
TUT_BASE = f"{AN_BASE}/tutorials"
TF_BASE = f"{AN_BASE}/{REPO}"
TEST_BASE = f"{TF_BASE}/test"
APP_BASE = f"{TF_BASE}/apps"
currentVersion = None
newVersion = None
appPat = r"^.*/app-([^/]*)$"
appRe = re.compile(appPat)
apps = set()
for appDir in glob(f"{AN_BASE}/app-*"):
match = appRe.fullmatch(appDir)
if match:
apps.add(match.group(1))
apps = sorted(apps)
appStr = ", ".join(apps)
HELP = f"""
python3 build.py command
command:
-h
--help
docs : serve docs locally
pdocs : build docs
sdocs : ship docs
clean : clean local develop build
l : local develop build
lp : local production build
i : local non-develop build
g : push to github, code and docs
v : show current version
r1 : version becomes r1+1.0.0
r2 : version becomes r1.r2+1.0
r3 : version becomes r1.r2.r3+1
ship : build for shipping
apps : commit and push all tf apps
tut : commit and push the tutorials repo
a : open {PACKAGE} browser on specific dataset
({appStr})
t : run test suite (relations, qperf)
data : build data files for github release
For g and ship you need to pass a commit message.
For data you need to pass an app argument:
{appStr}
"""
def readArgs():
args = sys.argv[1:]
if not len(args) or args[0] in {"-h", "--help", "help"}:
console(HELP)
return (False, None, [])
arg = args[0]
if arg not in {
"a",
"t",
"docs",
"pdocs",
"sdocs",
"clean",
"l",
"lp",
"i",
"g",
"ship",
"data",
"apps",
"tut",
"v",
"r1",
"r2",
"r3",
}:
console(HELP)
return (False, None, [])
if arg in {"g", "apps", "tut", "ship"}:
if len(args) < 2:
console("Provide a commit message")
return (False, None, [])
return (arg, args[1], args[2:])
if arg in {"a", "t", "data"}:
if len(args) < 2:
if arg in {"a", "data"}:
console(f"Provide a data source [{appStr}]")
elif arg in {"t"}:
console("Provide a test suite [relations, qperf]")
return (False, None, [])
return (arg, args[1], args[2:])
return (arg, None, [])
def incVersion(version, task):
comps = [int(c) for c in version.split(".")]
(major, minor, update) = comps
if task == "r1":
major += 1
minor = 0
update = 0
elif task == "r2":
minor += 1
update = 0
elif task == "r3":
update += 1
return ".".join(str(c) for c in (major, minor, update))
def replaceVersion(task, mask):
def subVersion(match):
global currentVersion
global newVersion
currentVersion = match.group(1)
newVersion = incVersion(currentVersion, task)
return mask.format(newVersion)
return subVersion
def showVersion():
global currentVersion
versions = set()
for (key, c) in VERSION_CONFIG.items():
with open(c["file"]) as fh:
text = fh.read()
match = c["re"].search(text)
version = match.group(1)
console(f'{version} (according to {c["file"]})')
versions.add(version)
currentVersion = None
if len(versions) == 1:
currentVersion = list(versions)[0]
def adjustVersion(task):
for (key, c) in VERSION_CONFIG.items():
console(f'Adjusting version in {c["file"]}')
with open(c["file"]) as fh:
text = fh.read()
text = c["re"].sub(replaceVersion(task, c["mask"]), text)
with open(c["file"], "w") as fh:
fh.write(text)
if currentVersion == newVersion:
console(f"Rebuilding version {newVersion}")
else:
console(f"Replacing version {currentVersion} by {newVersion}")
def makeDist(pypi=True):
distFile = "{}-{}".format(PACKAGE, currentVersion)
distFileCompressed = f"{distFile}.tar.gz"
distPath = f"{DIST}/{distFileCompressed}"
distPath = f"{DIST}/*"
if os.path.exists(DIST):
rmtree(DIST)
os.makedirs(DIST, exist_ok=True)
# run(["python3", "setup.py", "sdist", "bdist_wheel"])
run(["python3", "setup.py", "bdist_wheel"])
if pypi:
run(["twine", "upload", "-u", "dirkroorda", distPath])
# run("./purge.sh", shell=True)
def commit(task, msg):
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
if task in {"ship"}:
tagVersion = f"v{currentVersion}"
commitMessage = f"Release {currentVersion}: {msg}"
run(["git", "tag", "-a", tagVersion, "-m", commitMessage])
run(["git", "push", "origin", "--tags"])
def commitApps(msg):
for app in apps:
os.chdir(f"{AN_BASE}/app-{app}")
console(f"In {os.getcwd()}")
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
os.chdir(f"{TF_BASE}")
def commitTut(msg):
os.chdir(f"{TUT_BASE}")
console(f"In {os.getcwd()}")
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
os.chdir(f"{TF_BASE}")
def tfbrowse(dataset, remaining):
rargs = " ".join(remaining)
cmdLine = f"{PACKAGE} {dataset} {rargs}"
try:
run(cmdLine, shell=True)
except KeyboardInterrupt:
pass
def tftest(suite, remaining):
suiteDir = f"{TEST_BASE}/generic"
suiteFile = f"{suite}.py"
good = True
try:
os.chdir(suiteDir)
except Exception:
good = False
console(f'Cannot find TF test directory "{suiteDir}"')
if not good:
return
if not os.path.exists(suiteFile):
console(f'Cannot find TF test suite "{suite}"')
return
rargs = " ".join(remaining)
cmdLine = f"python3 {suiteFile} -v {rargs}"
try:
run(cmdLine, shell=True)
except KeyboardInterrupt:
pass
def clean():
run(["python3", "setup.py", "develop", "-u"])
if os.path.exists(SCRIPT):
os.unlink(SCRIPT)
run(["pip3", "uninstall", "-y", PACKAGE])
def main():
(task, msg, remaining) = readArgs()
if not task:
return
elif task == "a":
tfbrowse(msg, remaining)
elif task == "t":
tftest(msg, remaining)
elif task == "docs":
pdoc3serve(PKG)
elif task == "pdocs":
pdoc3(PKG)
elif task == "sdocs":
shipDocs(ORG, REPO, PKG)
elif task == "clean":
clean()
elif task == "l":
clean()
run(["python3", "setup.py", "develop"])
elif task == "lp":
clean()
run(["python3", "setup.py", "sdist"])
distFiles = glob(f"dist/{PACKAGE}-*.tar.gz")
run(["pip3", "install", distFiles[0]])
elif task == "i":
clean
makeDist(pypi=False)
run(
[
"pip3",
"install",
"--upgrade",
"--no-index",
"--find-links",
f'file://{TF_BASE}/dist"',
PACKAGE,
]
)
elif task == "g":
shipDocs(ORG, REPO, PKG)
commit(task, msg)
elif task == "apps":
commitApps(msg)
elif task == "tut":
commitTut(msg)
elif task == "v":
showVersion()
elif task in {"r", "r1", "r2", "r3"}:
adjustVersion(task)
elif task == "ship":
showVersion()
if not currentVersion:
console("No current version")
return
answer = input("right version ? [yn]")
if answer != "y":
return
shipDocs(ORG, REPO, PKG)
makeDist()
commit(task, msg)
main()
|
from typing import Optional, Union
import arrow
import discord
from dateutil import parser
from discord.ext import commands
from bot.bot import Bot
from bot.utils.extensions import invoke_help_command
# https://discord.com/developers/docs/reference#message-formatting-timestamp-styles
STYLES = {
"Epoch": ("",),
"Short Time": ("t", "h:mm A",),
"Long Time": ("T", "h:mm:ss A"),
"Short Date": ("d", "MM/DD/YYYY"),
"Long Date": ("D", "MMMM D, YYYY"),
"Short Date/Time": ("f", "MMMM D, YYYY h:mm A"),
"Long Date/Time": ("F", "dddd, MMMM D, YYYY h:mm A"),
"Relative Time": ("R",)
}
DROPDOWN_TIMEOUT = 60
class DateString(commands.Converter):
"""Convert a relative or absolute date/time string to an arrow.Arrow object."""
async def convert(self, ctx: commands.Context, argument: str) -> Union[arrow.Arrow, Optional[tuple]]:
"""
Convert a relative or absolute date/time string to an arrow.Arrow object.
Try to interpret the date string as a relative time. If conversion fails, try to interpret it as an absolute
time. Tokens that are not recognised are returned along with the part of the string that was successfully
converted to an arrow object. If the date string cannot be parsed, BadArgument is raised.
"""
try:
return arrow.utcnow().dehumanize(argument)
except (ValueError, OverflowError):
try:
dt, ignored_tokens = parser.parse(argument, fuzzy_with_tokens=True)
except parser.ParserError:
raise commands.BadArgument(f"`{argument}` Could not be parsed to a relative or absolute date.")
except OverflowError:
raise commands.BadArgument(f"`{argument}` Results in a date outside of the supported range.")
return arrow.get(dt), ignored_tokens
class Epoch(commands.Cog):
"""Convert an entered time and date to a unix timestamp."""
@commands.command(name="epoch")
async def epoch(self, ctx: commands.Context, *, date_time: DateString = None) -> None:
"""
Convert an entered date/time string to the equivalent epoch.
**Relative time**
Must begin with `in...` or end with `...ago`.
Accepted units: "seconds", "minutes", "hours", "days", "weeks", "months", "years".
eg `.epoch in a month 4 days and 2 hours`
**Absolute time**
eg `.epoch 2022/6/15 16:43 -04:00`
Absolute times must be entered in descending orders of magnitude.
If AM or PM is left unspecified, the 24-hour clock is assumed.
Timezones are optional, and will default to UTC. The following timezone formats are accepted:
Z (UTC)
±HH:MM
±HHMM
±HH
Times in the dropdown are shown in UTC
"""
if not date_time:
await invoke_help_command(ctx)
return
if isinstance(date_time, tuple):
# Remove empty strings. Strip extra whitespace from the remaining items
ignored_tokens = list(map(str.strip, filter(str.strip, date_time[1])))
date_time = date_time[0]
if ignored_tokens:
await ctx.send(f"Could not parse the following token(s): `{", ".join(ignored_tokens)}`")
await ctx.send(f"Date and time parsed as: `{date_time.format(arrow.FORMAT_RSS)}`")
epoch = int(date_time.timestamp())
view = TimestampMenuView(ctx, self._format_dates(date_time), epoch)
original = await ctx.send(f"`{epoch}`", view=view)
await view.wait() # wait until expiration before removing the dropdown
await original.edit(view=None)
@staticmethod
def _format_dates(date: arrow.Arrow) -> list[str]:
"""
Return a list of date strings formatted according to the discord timestamp styles.
These are used in the description of each style in the dropdown
"""
date = date.to('utc')
formatted = [str(int(date.timestamp()))]
formatted += [date.format(format[1]) for format in list(STYLES.values())[1:7]]
formatted.append(date.humanize())
return formatted
class TimestampMenuView(discord.ui.View):
"""View for the epoch command which contains a single `discord.ui.Select` dropdown component."""
def __init__(self, ctx: commands.Context, formatted_times: list[str], epoch: int):
super().__init__(timeout=DROPDOWN_TIMEOUT)
self.ctx = ctx
self.epoch = epoch
self.dropdown: discord.ui.Select = self.children[0]
for label, date_time in zip(STYLES.keys(), formatted_times):
self.dropdown.add_option(label=label, description=date_time)
@discord.ui.select(placeholder="Select the format of your timestamp")
async def select_format(self, _: discord.ui.Select, interaction: discord.Interaction) -> discord.Message:
"""Drop down menu which contains a list of formats which discord timestamps can take."""
selected = interaction.data["values"][0]
if selected == "Epoch":
return await interaction.response.edit_message(content=f"`{self.epoch}`")
return await interaction.response.edit_message(content=f"`<t:{self.epoch}:{STYLES[selected][0]}>`")
async def interaction_check(self, interaction: discord.Interaction) -> bool:
"""Check to ensure that the interacting user is the user who invoked the command."""
if interaction.user != self.ctx.author:
embed = discord.Embed(description="Sorry, but this dropdown menu can only be used by the original author.")
await interaction.response.send_message(embed=embed, ephemeral=True)
return False
return True
def setup(bot: Bot) -> None:
"""Load the Epoch cog."""
bot.add_cog(Epoch())
| from typing import Optional, Union
import arrow
import discord
from dateutil import parser
from discord.ext import commands
from bot.bot import Bot
from bot.utils.extensions import invoke_help_command
# https://discord.com/developers/docs/reference#message-formatting-timestamp-styles
STYLES = {
"Epoch": ("",),
"Short Time": ("t", "h:mm A",),
"Long Time": ("T", "h:mm:ss A"),
"Short Date": ("d", "MM/DD/YYYY"),
"Long Date": ("D", "MMMM D, YYYY"),
"Short Date/Time": ("f", "MMMM D, YYYY h:mm A"),
"Long Date/Time": ("F", "dddd, MMMM D, YYYY h:mm A"),
"Relative Time": ("R",)
}
DROPDOWN_TIMEOUT = 60
class DateString(commands.Converter):
"""Convert a relative or absolute date/time string to an arrow.Arrow object."""
async def convert(self, ctx: commands.Context, argument: str) -> Union[arrow.Arrow, Optional[tuple]]:
"""
Convert a relative or absolute date/time string to an arrow.Arrow object.
Try to interpret the date string as a relative time. If conversion fails, try to interpret it as an absolute
time. Tokens that are not recognised are returned along with the part of the string that was successfully
converted to an arrow object. If the date string cannot be parsed, BadArgument is raised.
"""
try:
return arrow.utcnow().dehumanize(argument)
except (ValueError, OverflowError):
try:
dt, ignored_tokens = parser.parse(argument, fuzzy_with_tokens=True)
except parser.ParserError:
raise commands.BadArgument(f"`{argument}` Could not be parsed to a relative or absolute date.")
except OverflowError:
raise commands.BadArgument(f"`{argument}` Results in a date outside of the supported range.")
return arrow.get(dt), ignored_tokens
class Epoch(commands.Cog):
"""Convert an entered time and date to a unix timestamp."""
@commands.command(name="epoch")
async def epoch(self, ctx: commands.Context, *, date_time: DateString = None) -> None:
"""
Convert an entered date/time string to the equivalent epoch.
**Relative time**
Must begin with `in...` or end with `...ago`.
Accepted units: "seconds", "minutes", "hours", "days", "weeks", "months", "years".
eg `.epoch in a month 4 days and 2 hours`
**Absolute time**
eg `.epoch 2022/6/15 16:43 -04:00`
Absolute times must be entered in descending orders of magnitude.
If AM or PM is left unspecified, the 24-hour clock is assumed.
Timezones are optional, and will default to UTC. The following timezone formats are accepted:
Z (UTC)
±HH:MM
±HHMM
±HH
Times in the dropdown are shown in UTC
"""
if not date_time:
await invoke_help_command(ctx)
return
if isinstance(date_time, tuple):
# Remove empty strings. Strip extra whitespace from the remaining items
ignored_tokens = list(map(str.strip, filter(str.strip, date_time[1])))
date_time = date_time[0]
if ignored_tokens:
await ctx.send(f"Could not parse the following token(s): `{', '.join(ignored_tokens)}`")
await ctx.send(f"Date and time parsed as: `{date_time.format(arrow.FORMAT_RSS)}`")
epoch = int(date_time.timestamp())
view = TimestampMenuView(ctx, self._format_dates(date_time), epoch)
original = await ctx.send(f"`{epoch}`", view=view)
await view.wait() # wait until expiration before removing the dropdown
await original.edit(view=None)
@staticmethod
def _format_dates(date: arrow.Arrow) -> list[str]:
"""
Return a list of date strings formatted according to the discord timestamp styles.
These are used in the description of each style in the dropdown
"""
date = date.to('utc')
formatted = [str(int(date.timestamp()))]
formatted += [date.format(format[1]) for format in list(STYLES.values())[1:7]]
formatted.append(date.humanize())
return formatted
class TimestampMenuView(discord.ui.View):
"""View for the epoch command which contains a single `discord.ui.Select` dropdown component."""
def __init__(self, ctx: commands.Context, formatted_times: list[str], epoch: int):
super().__init__(timeout=DROPDOWN_TIMEOUT)
self.ctx = ctx
self.epoch = epoch
self.dropdown: discord.ui.Select = self.children[0]
for label, date_time in zip(STYLES.keys(), formatted_times):
self.dropdown.add_option(label=label, description=date_time)
@discord.ui.select(placeholder="Select the format of your timestamp")
async def select_format(self, _: discord.ui.Select, interaction: discord.Interaction) -> discord.Message:
"""Drop down menu which contains a list of formats which discord timestamps can take."""
selected = interaction.data["values"][0]
if selected == "Epoch":
return await interaction.response.edit_message(content=f"`{self.epoch}`")
return await interaction.response.edit_message(content=f"`<t:{self.epoch}:{STYLES[selected][0]}>`")
async def interaction_check(self, interaction: discord.Interaction) -> bool:
"""Check to ensure that the interacting user is the user who invoked the command."""
if interaction.user != self.ctx.author:
embed = discord.Embed(description="Sorry, but this dropdown menu can only be used by the original author.")
await interaction.response.send_message(embed=embed, ephemeral=True)
return False
return True
def setup(bot: Bot) -> None:
"""Load the Epoch cog."""
bot.add_cog(Epoch())
|
"""Module for wrapping Jina Hub API calls."""
import argparse
import glob
import json
import time
import urllib.parse
import urllib.request
import webbrowser
from typing import Dict, Any, List
from .checker import *
from .helper import credentials_file
from .hubapi.local import _list_local, _load_local_hub_manifest
from .hubapi.remote import _list, _register_to_mongodb, _fetch_docker_auth
from .. import __version__ as jina_version, __resources_path__
from ..enums import BuildTestLevel
from ..excepts import (
HubBuilderError,
HubBuilderBuildError,
HubBuilderTestError,
HubLoginRequired,
ImageAlreadyExists,
)
from ..executors import BaseExecutor
from ..flow import Flow
from ..helper import (
colored,
get_readable_size,
get_now_timestamp,
get_full_version,
random_name,
expand_dict,
countdown,
)
from ..importer import ImportExtensions
from ..logging.logger import JinaLogger
from ..logging.profile import TimeContext, ProgressBar
from ..parsers import set_pod_parser
from ..peapods import Pod
_allowed = {
'name',
'description',
'author',
'url',
'documentation',
'version',
'vendor',
'license',
'avatar',
'platform',
'update',
'keywords',
}
_label_prefix = 'ai.jina.hub.'
class HubIO:
""":class:`HubIO` provides the way to interact with Jina Hub registry.
You can use it with CLI to package a directory into a Jina Hub image and publish it to the world.
Examples:
- :command:`jina hub build my_pod/` build the image
- :command:`jina hub build my_pod/ --push` build the image and push to the public registry
- :command:`jina hub pull jinahub/pod.dummy_mwu_encoder:0.0.6` to download the image
"""
def __init__(self, args: 'argparse.Namespace'):
"""Create a new HubIO.
:param args: arguments
"""
self.logger = JinaLogger(self.__class__.__name__, **vars(args))
self.args = args
self._load_docker_client()
def _load_docker_client(self):
with ImportExtensions(
required=False,
help_text='missing "docker" dependency, available CLIs limited to "jina hub [list, new]"'
'to enable full CLI, please do pip install "jina[docker]"',
):
import docker
from docker import APIClient, DockerClient
self._client: DockerClient = docker.from_env()
# low-level client
self._raw_client = APIClient(base_url='unix://var/run/docker.sock')
def new(self, no_input: bool = False) -> None:
"""
Create a new executor using cookiecutter template.
:param no_input: Argument to avoid prompting dialogue (just to be used for testing)
"""
with ImportExtensions(required=True):
from cookiecutter.main import cookiecutter
import click # part of cookiecutter
cookiecutter_template = self.args.template
if self.args.type == 'app':
cookiecutter_template = 'https://github.com/jina-ai/cookiecutter-jina.git'
elif self.args.type == 'pod':
cookiecutter_template = (
'https://github.com/jina-ai/cookiecutter-jina-hub.git'
)
try:
cookiecutter(
template=cookiecutter_template,
overwrite_if_exists=self.args.overwrite,
output_dir=self.args.output_dir,
no_input=no_input,
)
except click.exceptions.Abort:
self.logger.info('nothing is created, bye!')
def login(self) -> None:
"""Login using Github Device flow to allow push access to Jina Hub Registry."""
import requests
with open(os.path.join(__resources_path__, 'hubapi.yml')) as fp:
hubapi_yml = JAML.load(fp)
client_id = hubapi_yml['github']['client_id']
scope = hubapi_yml['github']['scope']
device_code_url = hubapi_yml['github']['device_code_url']
access_token_url = hubapi_yml['github']['access_token_url']
grant_type = hubapi_yml['github']['grant_type']
login_max_retry = hubapi_yml['github']['login_max_retry']
headers = {'Accept': 'application/json'}
code_request_body = {'client_id': client_id, 'scope': scope}
try:
self.logger.info(
'Jina Hub login will use Github Device to generate one time token'
)
response = requests.post(
url=device_code_url, headers=headers, data=code_request_body
)
if response.status_code != requests.codes.ok:
self.logger.error(
'cannot reach github server. please make sure you\'re connected to internet'
)
code_response = response.json()
device_code = code_response['device_code']
user_code = code_response['user_code']
verification_uri = code_response['verification_uri']
try:
self.logger.info(
f'You should see a "Device Activation" page open in your browser. '
f'If not, please go to {colored(verification_uri, 'cyan', attrs=['underline'])}'
)
self.logger.info(
'Please follow the steps:\n\n'
f'1. Enter the following code to that page: {colored(user_code, 'cyan', attrs=['bold'])}\n'
'2. Click "Continue"\n'
'3. Come back to this terminal\n'
)
# allowing sometime for the user to view the message
time.sleep(1)
webbrowser.open(verification_uri, new=2)
except:
pass # intentional pass, browser support isn't cross-platform
access_request_body = {
'client_id': client_id,
'device_code': device_code,
'grant_type': grant_type,
}
for _ in range(login_max_retry):
access_token_response = requests.post(
url=access_token_url, headers=headers, data=access_request_body
).json()
if access_token_response.get('error', None) == 'authorization_pending':
self.logger.warning('still waiting for authorization')
countdown(
10,
reason=colored(
're-fetch access token', 'cyan', attrs=['bold', 'reverse']
),
)
elif 'access_token' in access_token_response:
token = {'access_token': access_token_response['access_token']}
with open(credentials_file(), 'w') as cf:
JAML.dump(token, cf)
self.logger.success(f'✅ Successfully logged in!')
break
else:
self.logger.error(f'❌ Max retries {login_max_retry} reached')
except KeyError as exp:
self.logger.error(f'❌ Can not read the key in response: {exp}')
def list(self) -> Optional[List[Dict[str, Any]]]:
"""List all hub images given a filter specified by CLI.
:return: list of dictionaries of images
"""
if self.args.local_only:
return _list_local(self.logger)
else:
return _list(
logger=self.logger,
image_name=self.args.name,
image_kind=self.args.kind,
image_type=self.args.type,
image_keywords=self.args.keywords,
)
def push(
self,
name: Optional[str] = None,
build_result: Optional[Dict] = None,
) -> None:
"""Push image to Jina Hub.
:param name: name of image
:param build_result: dictionary containing the build summary
:return: None
"""
name = name or self.args.name
try:
# check if image exists
# fail if it does
if (
self.args.no_overwrite
and build_result
and self._image_version_exists(
build_result['manifest_info']['name'],
build_result['manifest_info']['version'],
jina_version,
)
):
raise ImageAlreadyExists(
f'Image with name {name} already exists. Will NOT overwrite.'
)
else:
self.logger.debug(
f'Image with name {name} does not exist. Pushing now...'
)
self._push_docker_hub(name)
if not build_result:
file_path = get_summary_path(name)
if os.path.isfile(file_path):
with open(file_path) as f:
build_result = json.load(f)
else:
self.logger.error(
f'can not find the build summary file.'
f'please use "jina hub build" to build the image first '
f'before pushing.'
)
return
if build_result:
if build_result.get('is_build_success', False):
_register_to_mongodb(logger=self.logger, summary=build_result)
if build_result.get('details', None) and build_result.get(
'build_history', None
):
self._write_slack_message(
build_result,
build_result['details'],
build_result['build_history'],
)
except Exception as e:
self.logger.error(f'Error when trying to push image {name}: {e!r}')
if isinstance(e, (ImageAlreadyExists, HubLoginRequired)):
raise e
def _push_docker_hub(self, name: Optional[str] = None) -> None:
"""Push to Docker Hub.
:param name: name of image
"""
check_registry(self.args.registry, name, self.args.repository)
self._check_docker_image(name)
self._docker_login()
with ProgressBar(task_name=f'pushing {name}', batch_unit='') as t:
for line in self._client.images.push(name, stream=True, decode=True):
t.update(1)
self.logger.debug(line)
self.logger.success(f'🎉 {name} is now published!')
share_link = f'https://api.jina.ai/hub/?jh={urllib.parse.quote_plus(name)}'
try:
webbrowser.open(share_link, new=2)
except:
# pass intentionally, dont want to bother users on opening browser failure
pass
finally:
self.logger.info(
f'Check out the usage {colored(share_link, 'cyan', attrs=['underline'])} and share it with others!'
)
def pull(self) -> None:
"""Pull docker image."""
check_registry(self.args.registry, self.args.name, self.args.repository)
try:
self._docker_login()
with TimeContext(f'pulling {self.args.name}', self.logger):
image = self._client.images.pull(self.args.name)
if isinstance(image, list):
image = image[0]
image_tag = image.tags[0] if image.tags else ''
self.logger.success(
f'🎉 pulled {image_tag} ({image.short_id}) uncompressed size: {get_readable_size(image.attrs['Size'])}'
)
except Exception as ex:
self.logger.error(
f'can not pull image {self.args.name} from {self.args.registry} due to {ex!r}'
)
def _check_docker_image(self, name: str) -> None:
# check local image
image = self._client.images.get(name)
for r in _allowed:
if f'{_label_prefix}{r}' not in image.labels.keys():
self.logger.warning(
f'{r} is missing in your docker image labels, you may want to check it'
)
try:
image.labels['ai.jina.hub.jina_version'] = jina_version
label_info = (
f'{self.args.repository}/'
+ '{type}.{kind}.{name}:{version}-{jina_version}'.format(
**{k.replace(_label_prefix, ''): v for k, v in image.labels.items()}
)
)
safe_name = safe_url_name(label_info)
if name != safe_name:
raise ValueError(
f'image {name} does not match with label info in the image. name should be {safe_name}'
)
except KeyError as e:
self.logger.error(f'missing key in the label of the image {repr(e)}')
raise
self.logger.info(f'✅ {name} is a valid Jina Hub image, ready to publish')
def _docker_login(self) -> None:
"""Log-in to Docker."""
from docker.errors import APIError
if not (self.args.username and self.args.password):
self.args.username, self.args.password = _fetch_docker_auth(
logger=self.logger
)
try:
self._client.login(
username=self.args.username,
password=self.args.password,
registry=self.args.registry,
)
self.logger.success(f'✅ Successfully logged in to docker hub')
except APIError:
raise HubLoginRequired(
f'❌ Invalid docker credentials passed. docker login failed'
)
def build(self) -> Dict:
"""
Perform a build of the Docker image.
:return: dictionary with information on image (manifest)
"""
if self.args.dry_run:
result = self.dry_run()
else:
is_build_success, is_push_success = True, False
_logs = []
_except_strs = []
_excepts = []
with TimeContext(
f'building {colored(self.args.path, 'green')}', self.logger
) as tc:
try:
_check_result = self._check_completeness()
self._freeze_jina_version()
_dockerfile = os.path.basename(_check_result['Dockerfile'])
_labels = {
_label_prefix + k: str(v) for k, v in self.manifest.items()
}
streamer = self._raw_client.build(
decode=True,
path=self.args.path,
tag=self.tag,
pull=self.args.pull,
dockerfile=_dockerfile,
labels=_labels,
rm=True,
)
for chunk in streamer:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
if is_error_message(line):
self.logger.critical(line)
_except_strs.append(line)
elif 'warning' in line.lower():
self.logger.warning(line)
else:
self.logger.info(line)
_logs.append(line)
except Exception as ex:
# if pytest fails it should end up here as well
self.logger.error(ex)
is_build_success = False
ex = HubBuilderBuildError(ex)
_except_strs.append(repr(ex))
_excepts.append(ex)
if is_build_success:
# compile it again, but this time don't show the log
image, log = self._client.images.build(
path=self.args.path,
tag=self.tag,
pull=self.args.pull,
dockerfile=_dockerfile,
labels=_labels,
rm=True,
)
# success
_details = {
'inspect': self._raw_client.inspect_image(image.tags[0]),
'tag': image.tags[0],
'hash': image.short_id,
'size': get_readable_size(image.attrs['Size']),
}
self.logger.success(
'🎉 built {tag} ({hash}) uncompressed size: {size}'.format_map(
_details
)
)
else:
self.logger.error(f'can not build the image due to {_except_strs}')
_details = {}
if is_build_success:
if self.args.test_uses:
p_names = []
try:
is_build_success = False
p_names, failed_test_levels = HubIO._test_build(
image,
self.args.test_level,
self.config_yaml_path,
self.args.timeout_ready,
self.args.daemon,
self.logger,
)
if any(
test_level in failed_test_levels
for test_level in [
BuildTestLevel.POD_DOCKER,
BuildTestLevel.FLOW,
]
):
is_build_success = False
self.logger.error(
f'build unsuccessful, failed at {str(failed_test_levels)} level'
)
else:
is_build_success = True
self.logger.warning(
f'Build successful. Tests failed at : {str(failed_test_levels)} levels. '
f'This could be due to the fact that the executor has non-installed external dependencies'
)
except Exception as ex:
self.logger.error(
f'something wrong while testing the build: {ex!r}'
)
ex = HubBuilderTestError(ex)
_except_strs.append(repr(ex))
_excepts.append(ex)
finally:
if self.args.daemon:
try:
for p in p_names:
self._raw_client.stop(p)
except:
pass # suppress on purpose
self._raw_client.prune_containers()
info, env_info = get_full_version()
_host_info = {
'jina': info,
'jina_envs': env_info,
'docker': self._raw_client.info(),
'build_args': vars(self.args),
}
_build_history = {
'time': get_now_timestamp(),
'host_info': _host_info
if is_build_success and self.args.host_info
else '',
'duration': tc.readable_duration,
'logs': _logs,
'exception': _except_strs,
}
if self.args.prune_images:
self.logger.info('deleting unused images')
self._raw_client.prune_images()
# since db tracks `version` & `jina_version` on the top level, let's get rid of them in `manifest`
if is_build_success:
_version = self.manifest['version']
self.manifest.pop('version', None)
self.manifest.pop('jina_version', None)
else:
_version = '0.0.1'
result = {
'name': self.executor_name if is_build_success else '',
'version': _version,
'jina_version': jina_version,
'path': self.args.path,
'manifest_info': self.manifest if is_build_success else '',
'details': _details,
'is_build_success': is_build_success,
'build_history': _build_history,
}
# only successful build (NOT dry run) writes the summary to disk
if result['is_build_success']:
self._write_summary_to_file(summary=result)
if self.args.push:
self.push(image.tags[0], result)
if not result['is_build_success'] and self.args.raise_error:
# remove the very verbose build log when throw error
if 'build_history' in result:
result['build_history'].pop('logs', None)
raise HubBuilderError(_excepts)
return result
@staticmethod
def _test_build(
image, # type docker image object
test_level: 'BuildTestLevel',
config_yaml_path: str,
timeout_ready: int,
daemon_arg: bool,
logger: 'JinaLogger',
):
p_names = []
failed_levels = []
logger.info(f'run tests using test level {test_level}')
# test uses at executor level
if test_level >= BuildTestLevel.EXECUTOR:
logger.info(
f'test to initialize an executor from yaml configuration: {config_yaml_path}'
)
try:
with BaseExecutor.load_config(config_yaml_path):
pass
logger.info(f'successfully tested to initialize an executor')
except:
logger.error(f'failed to initialize an executor')
failed_levels.append(BuildTestLevel.EXECUTOR)
# test uses at Pod level (no docker)
if test_level >= BuildTestLevel.POD_NONDOCKER:
logger.info(
f'test to initialize a pod from yaml configuration: {config_yaml_path}'
)
try:
with Pod(
set_pod_parser().parse_args(
[
'--uses',
config_yaml_path,
'--timeout-ready',
str(timeout_ready),
]
)
):
pass
logger.info(
f'successfully tested to initialize a pod from yaml configuration'
)
except:
logger.error(f'failed to initialize a pod')
failed_levels.append(BuildTestLevel.POD_NONDOCKER)
# test uses at Pod level (with docker)
if test_level >= BuildTestLevel.POD_DOCKER:
p_name = random_name()
logger.info(
f'test to initialize a pod via docker image {image.tags[0]} named {p_name}'
)
try:
with Pod(
set_pod_parser().parse_args(
[
'--uses',
f'docker://{image.tags[0]}',
'--name',
p_name,
'--timeout-ready',
str(timeout_ready),
]
+ ['--daemon']
if daemon_arg
else []
)
):
pass
p_names.append(p_name)
logger.info(f'successfully tested to initialize a pod via docker')
except:
logger.error(f'failed to initialize a pod via docker image')
failed_levels.append(BuildTestLevel.POD_DOCKER)
# test uses at Flow level
if test_level >= BuildTestLevel.FLOW:
p_name = random_name()
logger.info(
f'test to build a flow from docker image {image.tags[0]} named {p_name} '
f'with daemon={daemon_arg} and timeout_ready={timeout_ready}'
)
try:
with Flow().add(
name=p_name,
uses=f'docker://{image.tags[0]}',
daemon=daemon_arg,
timeout_ready=timeout_ready,
):
pass
p_names.append(p_name)
logger.info('successfully tested to build a flow from docker image')
except:
logger.error(f'failed to build a flow from docker image')
failed_levels.append(BuildTestLevel.FLOW)
return p_names, failed_levels
def dry_run(self) -> Dict:
"""
Perform a dry-run.
:return: a dict with the manifest info.
"""
try:
s = self._check_completeness()
s['is_build_success'] = True
except Exception as ex:
s = {'is_build_success': False, 'exception': str(ex)}
return s
def _write_summary_to_file(self, summary: Dict) -> None:
file_path = get_summary_path(f'{summary['name']}:{summary['version']}')
with open(file_path, 'w+') as f:
json.dump(summary, f)
self.logger.debug(f'stored the summary from build to {file_path}')
def _freeze_jina_version(self) -> None:
import pkg_resources
requirements_path = get_exist_path(self.args.path, 'requirements.txt')
if requirements_path and os.path.exists(requirements_path):
new_requirements = []
update = False
with open(requirements_path, 'r') as fp:
try:
requirements = pkg_resources.parse_requirements(fp)
for req in requirements:
if 'jina' in str(req):
update = True
self.logger.info(f'Freezing jina version to {jina_version}')
new_requirements.append(f'jina=={jina_version}')
else:
new_requirements.append(str(req))
except:
pass
if update:
with open(requirements_path, 'w') as fp:
fp.write('\n'.join(new_requirements))
@staticmethod
def _alias_to_local_path(alias: str):
"""
Convert user given alias to the actual local path of the image, if fails return the original
:param alias: the name of the hub image, given by user
:return: the local path of the hub image, if not matched then return the original input
"""
all_local_images = _load_local_hub_manifest()
if alias in all_local_images:
return all_local_images[alias]['source_path']
else:
return alias
@staticmethod
def _alias_to_docker_image_name(alias: str):
"""
Convert user given alias to the actual image tag, if fails return the original
:param alias: the name of the hub image, given by user
:return: the actual image tag, if not matched then return the original input
"""
all_local_images = _load_local_hub_manifest()
if alias in all_local_images:
return all_local_images[alias]['image_tag']
else:
return alias
def _check_completeness(self) -> Dict:
self.args.path = self._alias_to_local_path(self.args.path)
dockerfile_path = get_exist_path(self.args.path, self.args.file)
manifest_path = get_exist_path(self.args.path, 'manifest.yml')
self.config_yaml_path = get_exist_path(self.args.path, 'config.yml')
readme_path = get_exist_path(self.args.path, 'README.md')
requirements_path = get_exist_path(self.args.path, 'requirements.txt')
yaml_glob = set(glob.glob(os.path.join(self.args.path, '*.yml')))
yaml_glob.difference_update({manifest_path, self.config_yaml_path})
if not self.config_yaml_path:
self.config_yaml_path = yaml_glob.pop()
py_glob = glob.glob(os.path.join(self.args.path, '*.py'))
test_glob = glob.glob(os.path.join(self.args.path, 'tests/test_*.py'))
completeness = {
'Dockerfile': dockerfile_path,
'manifest.yml': manifest_path,
'config.yml': self.config_yaml_path,
'README.md': readme_path,
'requirements.txt': requirements_path,
'*.yml': yaml_glob,
'*.py': py_glob,
'tests': test_glob,
}
self.logger.info(
f'completeness check\n'
+ '\n'.join(
f'{colored('✓', 'green') if v else colored('✗', 'red'):>4} {k:<20} {v}'
for k, v in completeness.items()
)
+ '\n'
)
if not (completeness['Dockerfile'] and completeness['manifest.yml']):
self.logger.critical(
'Dockerfile or manifest.yml is not given, can not build'
)
raise FileNotFoundError(
'Dockerfile or manifest.yml is not given, can not build'
)
self.manifest = self._read_manifest(manifest_path)
self.manifest['jina_version'] = jina_version
self.executor_name = safe_url_name(
f'{self.args.repository}/'
+ f'{self.manifest['type']}.{self.manifest['kind']}.{self.manifest['name']}'
)
self.tag = self.executor_name + f':{self.manifest['version']}-{jina_version}'
return completeness
def _read_manifest(self, path: str, validate: bool = True) -> Dict:
with open(
os.path.join(__resources_path__, 'hub-builder', 'manifest.yml')
) as fp:
tmp = JAML.load(
fp
) # do not expand variables at here, i.e. DO NOT USE expand_dict(yaml.load(fp))
with open(path) as fp:
tmp.update(JAML.load(fp))
if validate:
self._validate_manifest(tmp)
return tmp
def _validate_manifest(self, manifest: Dict) -> None:
required = {'name', 'type', 'version'}
# check the required field in manifest
for r in required:
if r not in manifest:
raise ValueError(f'{r} is missing in the manifest.yaml, it is required')
# check if all fields are there
for r in _allowed:
if r not in manifest:
self.logger.warning(
f'{r} is missing in your manifest.yml, you may want to check it'
)
# check name
check_name(manifest['name'])
# check_image_type
check_image_type(manifest['type'])
# check version number
check_version(manifest['version'])
# check version number
check_license(manifest['license'])
# check platform
if not isinstance(manifest['platform'], list):
manifest['platform'] = list(manifest['platform'])
check_platform(manifest['platform'])
# replace all chars in value to safe chars
for k, v in manifest.items():
if v and isinstance(v, str):
manifest[k] = remove_control_characters(v)
# show manifest key-values
for k, v in manifest.items():
self.logger.debug(f'{k}: {v}')
def _write_slack_message(self, *args):
def _expand_fn(v):
if isinstance(v, str):
for d in args:
try:
v = v.format(**d)
except KeyError:
pass
return v
if 'JINAHUB_SLACK_WEBHOOK' in os.environ:
with open(
os.path.join(
__resources_path__, 'hub-builder-success', 'slack-jinahub.json'
)
) as fp:
tmp = expand_dict(json.load(fp), _expand_fn, resolve_cycle_ref=False)
req = urllib.request.Request(os.environ['JINAHUB_SLACK_WEBHOOK'])
req.add_header('Content-Type', 'application/json; charset=utf-8')
jdb = json.dumps(tmp).encode('utf-8') # needs to be bytes
req.add_header('Content-Length', str(len(jdb)))
with urllib.request.urlopen(req, jdb) as f:
res = f.read()
self.logger.info(f'push to Slack: {res}')
# alias of "new" in cli
create = new
init = new
def _image_version_exists(self, name, module_version, req_jina_version):
manifests = _list(self.logger, name)
# check if matching module version and jina version exists
if manifests:
matching = [
m
for m in manifests
if m['version'] == module_version
and 'jina_version' in m.keys()
and m['jina_version'] == req_jina_version
]
return len(matching) > 0
return False
| """Module for wrapping Jina Hub API calls."""
import argparse
import glob
import json
import time
import urllib.parse
import urllib.request
import webbrowser
from typing import Dict, Any, List
from .checker import *
from .helper import credentials_file
from .hubapi.local import _list_local, _load_local_hub_manifest
from .hubapi.remote import _list, _register_to_mongodb, _fetch_docker_auth
from .. import __version__ as jina_version, __resources_path__
from ..enums import BuildTestLevel
from ..excepts import (
HubBuilderError,
HubBuilderBuildError,
HubBuilderTestError,
HubLoginRequired,
ImageAlreadyExists,
)
from ..executors import BaseExecutor
from ..flow import Flow
from ..helper import (
colored,
get_readable_size,
get_now_timestamp,
get_full_version,
random_name,
expand_dict,
countdown,
)
from ..importer import ImportExtensions
from ..logging.logger import JinaLogger
from ..logging.profile import TimeContext, ProgressBar
from ..parsers import set_pod_parser
from ..peapods import Pod
_allowed = {
'name',
'description',
'author',
'url',
'documentation',
'version',
'vendor',
'license',
'avatar',
'platform',
'update',
'keywords',
}
_label_prefix = 'ai.jina.hub.'
class HubIO:
""":class:`HubIO` provides the way to interact with Jina Hub registry.
You can use it with CLI to package a directory into a Jina Hub image and publish it to the world.
Examples:
- :command:`jina hub build my_pod/` build the image
- :command:`jina hub build my_pod/ --push` build the image and push to the public registry
- :command:`jina hub pull jinahub/pod.dummy_mwu_encoder:0.0.6` to download the image
"""
def __init__(self, args: 'argparse.Namespace'):
"""Create a new HubIO.
:param args: arguments
"""
self.logger = JinaLogger(self.__class__.__name__, **vars(args))
self.args = args
self._load_docker_client()
def _load_docker_client(self):
with ImportExtensions(
required=False,
help_text='missing "docker" dependency, available CLIs limited to "jina hub [list, new]"'
'to enable full CLI, please do pip install "jina[docker]"',
):
import docker
from docker import APIClient, DockerClient
self._client: DockerClient = docker.from_env()
# low-level client
self._raw_client = APIClient(base_url='unix://var/run/docker.sock')
def new(self, no_input: bool = False) -> None:
"""
Create a new executor using cookiecutter template.
:param no_input: Argument to avoid prompting dialogue (just to be used for testing)
"""
with ImportExtensions(required=True):
from cookiecutter.main import cookiecutter
import click # part of cookiecutter
cookiecutter_template = self.args.template
if self.args.type == 'app':
cookiecutter_template = 'https://github.com/jina-ai/cookiecutter-jina.git'
elif self.args.type == 'pod':
cookiecutter_template = (
'https://github.com/jina-ai/cookiecutter-jina-hub.git'
)
try:
cookiecutter(
template=cookiecutter_template,
overwrite_if_exists=self.args.overwrite,
output_dir=self.args.output_dir,
no_input=no_input,
)
except click.exceptions.Abort:
self.logger.info('nothing is created, bye!')
def login(self) -> None:
"""Login using Github Device flow to allow push access to Jina Hub Registry."""
import requests
with open(os.path.join(__resources_path__, 'hubapi.yml')) as fp:
hubapi_yml = JAML.load(fp)
client_id = hubapi_yml['github']['client_id']
scope = hubapi_yml['github']['scope']
device_code_url = hubapi_yml['github']['device_code_url']
access_token_url = hubapi_yml['github']['access_token_url']
grant_type = hubapi_yml['github']['grant_type']
login_max_retry = hubapi_yml['github']['login_max_retry']
headers = {'Accept': 'application/json'}
code_request_body = {'client_id': client_id, 'scope': scope}
try:
self.logger.info(
'Jina Hub login will use Github Device to generate one time token'
)
response = requests.post(
url=device_code_url, headers=headers, data=code_request_body
)
if response.status_code != requests.codes.ok:
self.logger.error(
'cannot reach github server. please make sure you\'re connected to internet'
)
code_response = response.json()
device_code = code_response['device_code']
user_code = code_response['user_code']
verification_uri = code_response['verification_uri']
try:
self.logger.info(
f'You should see a "Device Activation" page open in your browser. '
f'If not, please go to {colored(verification_uri, "cyan", attrs=["underline"])}'
)
self.logger.info(
'Please follow the steps:\n\n'
f'1. Enter the following code to that page: {colored(user_code, "cyan", attrs=["bold"])}\n'
'2. Click "Continue"\n'
'3. Come back to this terminal\n'
)
# allowing sometime for the user to view the message
time.sleep(1)
webbrowser.open(verification_uri, new=2)
except:
pass # intentional pass, browser support isn't cross-platform
access_request_body = {
'client_id': client_id,
'device_code': device_code,
'grant_type': grant_type,
}
for _ in range(login_max_retry):
access_token_response = requests.post(
url=access_token_url, headers=headers, data=access_request_body
).json()
if access_token_response.get('error', None) == 'authorization_pending':
self.logger.warning('still waiting for authorization')
countdown(
10,
reason=colored(
're-fetch access token', 'cyan', attrs=['bold', 'reverse']
),
)
elif 'access_token' in access_token_response:
token = {'access_token': access_token_response['access_token']}
with open(credentials_file(), 'w') as cf:
JAML.dump(token, cf)
self.logger.success(f'✅ Successfully logged in!')
break
else:
self.logger.error(f'❌ Max retries {login_max_retry} reached')
except KeyError as exp:
self.logger.error(f'❌ Can not read the key in response: {exp}')
def list(self) -> Optional[List[Dict[str, Any]]]:
"""List all hub images given a filter specified by CLI.
:return: list of dictionaries of images
"""
if self.args.local_only:
return _list_local(self.logger)
else:
return _list(
logger=self.logger,
image_name=self.args.name,
image_kind=self.args.kind,
image_type=self.args.type,
image_keywords=self.args.keywords,
)
def push(
self,
name: Optional[str] = None,
build_result: Optional[Dict] = None,
) -> None:
"""Push image to Jina Hub.
:param name: name of image
:param build_result: dictionary containing the build summary
:return: None
"""
name = name or self.args.name
try:
# check if image exists
# fail if it does
if (
self.args.no_overwrite
and build_result
and self._image_version_exists(
build_result['manifest_info']['name'],
build_result['manifest_info']['version'],
jina_version,
)
):
raise ImageAlreadyExists(
f'Image with name {name} already exists. Will NOT overwrite.'
)
else:
self.logger.debug(
f'Image with name {name} does not exist. Pushing now...'
)
self._push_docker_hub(name)
if not build_result:
file_path = get_summary_path(name)
if os.path.isfile(file_path):
with open(file_path) as f:
build_result = json.load(f)
else:
self.logger.error(
f'can not find the build summary file.'
f'please use "jina hub build" to build the image first '
f'before pushing.'
)
return
if build_result:
if build_result.get('is_build_success', False):
_register_to_mongodb(logger=self.logger, summary=build_result)
if build_result.get('details', None) and build_result.get(
'build_history', None
):
self._write_slack_message(
build_result,
build_result['details'],
build_result['build_history'],
)
except Exception as e:
self.logger.error(f'Error when trying to push image {name}: {e!r}')
if isinstance(e, (ImageAlreadyExists, HubLoginRequired)):
raise e
def _push_docker_hub(self, name: Optional[str] = None) -> None:
"""Push to Docker Hub.
:param name: name of image
"""
check_registry(self.args.registry, name, self.args.repository)
self._check_docker_image(name)
self._docker_login()
with ProgressBar(task_name=f'pushing {name}', batch_unit='') as t:
for line in self._client.images.push(name, stream=True, decode=True):
t.update(1)
self.logger.debug(line)
self.logger.success(f'🎉 {name} is now published!')
share_link = f'https://api.jina.ai/hub/?jh={urllib.parse.quote_plus(name)}'
try:
webbrowser.open(share_link, new=2)
except:
# pass intentionally, dont want to bother users on opening browser failure
pass
finally:
self.logger.info(
f'Check out the usage {colored(share_link, "cyan", attrs=["underline"])} and share it with others!'
)
def pull(self) -> None:
"""Pull docker image."""
check_registry(self.args.registry, self.args.name, self.args.repository)
try:
self._docker_login()
with TimeContext(f'pulling {self.args.name}', self.logger):
image = self._client.images.pull(self.args.name)
if isinstance(image, list):
image = image[0]
image_tag = image.tags[0] if image.tags else ''
self.logger.success(
f'🎉 pulled {image_tag} ({image.short_id}) uncompressed size: {get_readable_size(image.attrs["Size"])}'
)
except Exception as ex:
self.logger.error(
f'can not pull image {self.args.name} from {self.args.registry} due to {ex!r}'
)
def _check_docker_image(self, name: str) -> None:
# check local image
image = self._client.images.get(name)
for r in _allowed:
if f'{_label_prefix}{r}' not in image.labels.keys():
self.logger.warning(
f'{r} is missing in your docker image labels, you may want to check it'
)
try:
image.labels['ai.jina.hub.jina_version'] = jina_version
label_info = (
f'{self.args.repository}/'
+ '{type}.{kind}.{name}:{version}-{jina_version}'.format(
**{k.replace(_label_prefix, ''): v for k, v in image.labels.items()}
)
)
safe_name = safe_url_name(label_info)
if name != safe_name:
raise ValueError(
f'image {name} does not match with label info in the image. name should be {safe_name}'
)
except KeyError as e:
self.logger.error(f'missing key in the label of the image {repr(e)}')
raise
self.logger.info(f'✅ {name} is a valid Jina Hub image, ready to publish')
def _docker_login(self) -> None:
"""Log-in to Docker."""
from docker.errors import APIError
if not (self.args.username and self.args.password):
self.args.username, self.args.password = _fetch_docker_auth(
logger=self.logger
)
try:
self._client.login(
username=self.args.username,
password=self.args.password,
registry=self.args.registry,
)
self.logger.success(f'✅ Successfully logged in to docker hub')
except APIError:
raise HubLoginRequired(
f'❌ Invalid docker credentials passed. docker login failed'
)
def build(self) -> Dict:
"""
Perform a build of the Docker image.
:return: dictionary with information on image (manifest)
"""
if self.args.dry_run:
result = self.dry_run()
else:
is_build_success, is_push_success = True, False
_logs = []
_except_strs = []
_excepts = []
with TimeContext(
f'building {colored(self.args.path, "green")}', self.logger
) as tc:
try:
_check_result = self._check_completeness()
self._freeze_jina_version()
_dockerfile = os.path.basename(_check_result['Dockerfile'])
_labels = {
_label_prefix + k: str(v) for k, v in self.manifest.items()
}
streamer = self._raw_client.build(
decode=True,
path=self.args.path,
tag=self.tag,
pull=self.args.pull,
dockerfile=_dockerfile,
labels=_labels,
rm=True,
)
for chunk in streamer:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
if is_error_message(line):
self.logger.critical(line)
_except_strs.append(line)
elif 'warning' in line.lower():
self.logger.warning(line)
else:
self.logger.info(line)
_logs.append(line)
except Exception as ex:
# if pytest fails it should end up here as well
self.logger.error(ex)
is_build_success = False
ex = HubBuilderBuildError(ex)
_except_strs.append(repr(ex))
_excepts.append(ex)
if is_build_success:
# compile it again, but this time don't show the log
image, log = self._client.images.build(
path=self.args.path,
tag=self.tag,
pull=self.args.pull,
dockerfile=_dockerfile,
labels=_labels,
rm=True,
)
# success
_details = {
'inspect': self._raw_client.inspect_image(image.tags[0]),
'tag': image.tags[0],
'hash': image.short_id,
'size': get_readable_size(image.attrs['Size']),
}
self.logger.success(
'🎉 built {tag} ({hash}) uncompressed size: {size}'.format_map(
_details
)
)
else:
self.logger.error(f'can not build the image due to {_except_strs}')
_details = {}
if is_build_success:
if self.args.test_uses:
p_names = []
try:
is_build_success = False
p_names, failed_test_levels = HubIO._test_build(
image,
self.args.test_level,
self.config_yaml_path,
self.args.timeout_ready,
self.args.daemon,
self.logger,
)
if any(
test_level in failed_test_levels
for test_level in [
BuildTestLevel.POD_DOCKER,
BuildTestLevel.FLOW,
]
):
is_build_success = False
self.logger.error(
f'build unsuccessful, failed at {str(failed_test_levels)} level'
)
else:
is_build_success = True
self.logger.warning(
f'Build successful. Tests failed at : {str(failed_test_levels)} levels. '
f'This could be due to the fact that the executor has non-installed external dependencies'
)
except Exception as ex:
self.logger.error(
f'something wrong while testing the build: {ex!r}'
)
ex = HubBuilderTestError(ex)
_except_strs.append(repr(ex))
_excepts.append(ex)
finally:
if self.args.daemon:
try:
for p in p_names:
self._raw_client.stop(p)
except:
pass # suppress on purpose
self._raw_client.prune_containers()
info, env_info = get_full_version()
_host_info = {
'jina': info,
'jina_envs': env_info,
'docker': self._raw_client.info(),
'build_args': vars(self.args),
}
_build_history = {
'time': get_now_timestamp(),
'host_info': _host_info
if is_build_success and self.args.host_info
else '',
'duration': tc.readable_duration,
'logs': _logs,
'exception': _except_strs,
}
if self.args.prune_images:
self.logger.info('deleting unused images')
self._raw_client.prune_images()
# since db tracks `version` & `jina_version` on the top level, let's get rid of them in `manifest`
if is_build_success:
_version = self.manifest['version']
self.manifest.pop('version', None)
self.manifest.pop('jina_version', None)
else:
_version = '0.0.1'
result = {
'name': self.executor_name if is_build_success else '',
'version': _version,
'jina_version': jina_version,
'path': self.args.path,
'manifest_info': self.manifest if is_build_success else '',
'details': _details,
'is_build_success': is_build_success,
'build_history': _build_history,
}
# only successful build (NOT dry run) writes the summary to disk
if result['is_build_success']:
self._write_summary_to_file(summary=result)
if self.args.push:
self.push(image.tags[0], result)
if not result['is_build_success'] and self.args.raise_error:
# remove the very verbose build log when throw error
if 'build_history' in result:
result['build_history'].pop('logs', None)
raise HubBuilderError(_excepts)
return result
@staticmethod
def _test_build(
image, # type docker image object
test_level: 'BuildTestLevel',
config_yaml_path: str,
timeout_ready: int,
daemon_arg: bool,
logger: 'JinaLogger',
):
p_names = []
failed_levels = []
logger.info(f'run tests using test level {test_level}')
# test uses at executor level
if test_level >= BuildTestLevel.EXECUTOR:
logger.info(
f'test to initialize an executor from yaml configuration: {config_yaml_path}'
)
try:
with BaseExecutor.load_config(config_yaml_path):
pass
logger.info(f'successfully tested to initialize an executor')
except:
logger.error(f'failed to initialize an executor')
failed_levels.append(BuildTestLevel.EXECUTOR)
# test uses at Pod level (no docker)
if test_level >= BuildTestLevel.POD_NONDOCKER:
logger.info(
f'test to initialize a pod from yaml configuration: {config_yaml_path}'
)
try:
with Pod(
set_pod_parser().parse_args(
[
'--uses',
config_yaml_path,
'--timeout-ready',
str(timeout_ready),
]
)
):
pass
logger.info(
f'successfully tested to initialize a pod from yaml configuration'
)
except:
logger.error(f'failed to initialize a pod')
failed_levels.append(BuildTestLevel.POD_NONDOCKER)
# test uses at Pod level (with docker)
if test_level >= BuildTestLevel.POD_DOCKER:
p_name = random_name()
logger.info(
f'test to initialize a pod via docker image {image.tags[0]} named {p_name}'
)
try:
with Pod(
set_pod_parser().parse_args(
[
'--uses',
f'docker://{image.tags[0]}',
'--name',
p_name,
'--timeout-ready',
str(timeout_ready),
]
+ ['--daemon']
if daemon_arg
else []
)
):
pass
p_names.append(p_name)
logger.info(f'successfully tested to initialize a pod via docker')
except:
logger.error(f'failed to initialize a pod via docker image')
failed_levels.append(BuildTestLevel.POD_DOCKER)
# test uses at Flow level
if test_level >= BuildTestLevel.FLOW:
p_name = random_name()
logger.info(
f'test to build a flow from docker image {image.tags[0]} named {p_name} '
f'with daemon={daemon_arg} and timeout_ready={timeout_ready}'
)
try:
with Flow().add(
name=p_name,
uses=f'docker://{image.tags[0]}',
daemon=daemon_arg,
timeout_ready=timeout_ready,
):
pass
p_names.append(p_name)
logger.info('successfully tested to build a flow from docker image')
except:
logger.error(f'failed to build a flow from docker image')
failed_levels.append(BuildTestLevel.FLOW)
return p_names, failed_levels
def dry_run(self) -> Dict:
"""
Perform a dry-run.
:return: a dict with the manifest info.
"""
try:
s = self._check_completeness()
s['is_build_success'] = True
except Exception as ex:
s = {'is_build_success': False, 'exception': str(ex)}
return s
def _write_summary_to_file(self, summary: Dict) -> None:
file_path = get_summary_path(f'{summary["name"]}:{summary["version"]}')
with open(file_path, 'w+') as f:
json.dump(summary, f)
self.logger.debug(f'stored the summary from build to {file_path}')
def _freeze_jina_version(self) -> None:
import pkg_resources
requirements_path = get_exist_path(self.args.path, 'requirements.txt')
if requirements_path and os.path.exists(requirements_path):
new_requirements = []
update = False
with open(requirements_path, 'r') as fp:
try:
requirements = pkg_resources.parse_requirements(fp)
for req in requirements:
if 'jina' in str(req):
update = True
self.logger.info(f'Freezing jina version to {jina_version}')
new_requirements.append(f'jina=={jina_version}')
else:
new_requirements.append(str(req))
except:
pass
if update:
with open(requirements_path, 'w') as fp:
fp.write('\n'.join(new_requirements))
@staticmethod
def _alias_to_local_path(alias: str):
"""
Convert user given alias to the actual local path of the image, if fails return the original
:param alias: the name of the hub image, given by user
:return: the local path of the hub image, if not matched then return the original input
"""
all_local_images = _load_local_hub_manifest()
if alias in all_local_images:
return all_local_images[alias]['source_path']
else:
return alias
@staticmethod
def _alias_to_docker_image_name(alias: str):
"""
Convert user given alias to the actual image tag, if fails return the original
:param alias: the name of the hub image, given by user
:return: the actual image tag, if not matched then return the original input
"""
all_local_images = _load_local_hub_manifest()
if alias in all_local_images:
return all_local_images[alias]['image_tag']
else:
return alias
def _check_completeness(self) -> Dict:
self.args.path = self._alias_to_local_path(self.args.path)
dockerfile_path = get_exist_path(self.args.path, self.args.file)
manifest_path = get_exist_path(self.args.path, 'manifest.yml')
self.config_yaml_path = get_exist_path(self.args.path, 'config.yml')
readme_path = get_exist_path(self.args.path, 'README.md')
requirements_path = get_exist_path(self.args.path, 'requirements.txt')
yaml_glob = set(glob.glob(os.path.join(self.args.path, '*.yml')))
yaml_glob.difference_update({manifest_path, self.config_yaml_path})
if not self.config_yaml_path:
self.config_yaml_path = yaml_glob.pop()
py_glob = glob.glob(os.path.join(self.args.path, '*.py'))
test_glob = glob.glob(os.path.join(self.args.path, 'tests/test_*.py'))
completeness = {
'Dockerfile': dockerfile_path,
'manifest.yml': manifest_path,
'config.yml': self.config_yaml_path,
'README.md': readme_path,
'requirements.txt': requirements_path,
'*.yml': yaml_glob,
'*.py': py_glob,
'tests': test_glob,
}
self.logger.info(
f'completeness check\n'
+ '\n'.join(
f'{colored("✓", "green") if v else colored("✗", "red"):>4} {k:<20} {v}'
for k, v in completeness.items()
)
+ '\n'
)
if not (completeness['Dockerfile'] and completeness['manifest.yml']):
self.logger.critical(
'Dockerfile or manifest.yml is not given, can not build'
)
raise FileNotFoundError(
'Dockerfile or manifest.yml is not given, can not build'
)
self.manifest = self._read_manifest(manifest_path)
self.manifest['jina_version'] = jina_version
self.executor_name = safe_url_name(
f'{self.args.repository}/'
+ f'{self.manifest["type"]}.{self.manifest["kind"]}.{self.manifest["name"]}'
)
self.tag = self.executor_name + f':{self.manifest["version"]}-{jina_version}'
return completeness
def _read_manifest(self, path: str, validate: bool = True) -> Dict:
with open(
os.path.join(__resources_path__, 'hub-builder', 'manifest.yml')
) as fp:
tmp = JAML.load(
fp
) # do not expand variables at here, i.e. DO NOT USE expand_dict(yaml.load(fp))
with open(path) as fp:
tmp.update(JAML.load(fp))
if validate:
self._validate_manifest(tmp)
return tmp
def _validate_manifest(self, manifest: Dict) -> None:
required = {'name', 'type', 'version'}
# check the required field in manifest
for r in required:
if r not in manifest:
raise ValueError(f'{r} is missing in the manifest.yaml, it is required')
# check if all fields are there
for r in _allowed:
if r not in manifest:
self.logger.warning(
f'{r} is missing in your manifest.yml, you may want to check it'
)
# check name
check_name(manifest['name'])
# check_image_type
check_image_type(manifest['type'])
# check version number
check_version(manifest['version'])
# check version number
check_license(manifest['license'])
# check platform
if not isinstance(manifest['platform'], list):
manifest['platform'] = list(manifest['platform'])
check_platform(manifest['platform'])
# replace all chars in value to safe chars
for k, v in manifest.items():
if v and isinstance(v, str):
manifest[k] = remove_control_characters(v)
# show manifest key-values
for k, v in manifest.items():
self.logger.debug(f'{k}: {v}')
def _write_slack_message(self, *args):
def _expand_fn(v):
if isinstance(v, str):
for d in args:
try:
v = v.format(**d)
except KeyError:
pass
return v
if 'JINAHUB_SLACK_WEBHOOK' in os.environ:
with open(
os.path.join(
__resources_path__, 'hub-builder-success', 'slack-jinahub.json'
)
) as fp:
tmp = expand_dict(json.load(fp), _expand_fn, resolve_cycle_ref=False)
req = urllib.request.Request(os.environ['JINAHUB_SLACK_WEBHOOK'])
req.add_header('Content-Type', 'application/json; charset=utf-8')
jdb = json.dumps(tmp).encode('utf-8') # needs to be bytes
req.add_header('Content-Length', str(len(jdb)))
with urllib.request.urlopen(req, jdb) as f:
res = f.read()
self.logger.info(f'push to Slack: {res}')
# alias of "new" in cli
create = new
init = new
def _image_version_exists(self, name, module_version, req_jina_version):
manifests = _list(self.logger, name)
# check if matching module version and jina version exists
if manifests:
matching = [
m
for m in manifests
if m['version'] == module_version
and 'jina_version' in m.keys()
and m['jina_version'] == req_jina_version
]
return len(matching) > 0
return False
|
import os
import pickle
import sys
from os.path import join
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from plots.plot_utils import set_matplotlib_params
from utils.utils import log, ensure_dir_exists
set_matplotlib_params()
plt.rcParams['figure.figsize'] = (5.5, 2.3) #(2.5, 2.0) 7.5, 4
B = int(1e9)
EXPERIMENTS = {
'doom_battle': dict(is_pbt=False, dir='doom_battle_appo_v56_4p', key='0_aux/avg_reward', x_ticks=[0, B, 2*B, 3*B, 4*B], max_x=4*B, y_ticks=[0, 10, 20, 30, 40, 50], x_label='Env. frames, skip=4', title='Battle', baselines=((33, 'DFP'), (35, 'DFP+CV')), legend='SampleFactory'),
'doom_battle2': dict(is_pbt=False, dir='doom_battle2_appo_v65_fs4', key='0_aux/avg_true_reward', x_ticks=[0, B, 2*B, 3*B], max_x=3*B, y_ticks=[0, 5, 10, 15, 20, 25], x_label='Env. frames, skip=4', title='Battle2', baselines=((17, 'DFP'), ), legend='SampleFactory'),
'doom_deathmatch': dict(is_pbt=True, dir='doom_bots_v63_pbt', key='0_aux/avg_true_reward', x_ticks=[0, B//2, B, 3*B//2, 2*B, 5*B//2], max_x=5*B//2, y_ticks=[0, 20, 40, 60, 80], x_label='Env. frames, skip=2', title='Deathmatch vs bots', baselines=((12.6, 'Avg. scripted bot'), (22, 'Best scripted bot')), legend='Population mean'),
'doom_duel': dict(is_pbt=True, dir='paper_doom_duel_bots_v65_fs2', key='0_aux/avg_true_reward', x_ticks=[0, B//2, B, 3*B//2, 2*B, 5*B//2], max_x=5*B//2, y_ticks=[0, 10, 20, 30, 40], x_label='Env. frames, skip=2', title='Duel vs bots', baselines=((3.66, 'Avg. scripted bot'), (5, 'Best scripted bot')), legend='Population mean'),
}
PLOT_NAMES = dict(
# doom_my_way_home='Find My Way Home',
# doom_deadly_corridor='Deadly Corridor',
# doom_defend_the_center='Defend the Center',
# doom_defend_the_line='Defend the Line',
# doom_health_gathering='Health Gathering',
# doom_health_gathering_supreme='Health Gathering Supreme',
)
def extract(env, experiments):
# scalar_accumulators = [EventAccumulator(str(dpath / dname / subpath)).Reload().scalars
# for dname in os.listdir(dpath) if dname != FOLDER_NAME and dname in hide_file]
scalar_accumulators = [EventAccumulator(experiment_dir).Reload().scalars for experiment_dir in experiments]
# Filter non event files
scalar_accumulators = [scalar_accumulator for scalar_accumulator in scalar_accumulators if scalar_accumulator.Keys()]
# Get and validate all scalar keys
# zhehui sorted(scalar_accumulator.Keys())
all_keys = [tuple(sorted(scalar_accumulator.Keys())) for scalar_accumulator in scalar_accumulators]
# assert len(set(all_keys)) == 1, "All runs need to have the same scalar keys. There are mismatches in {}".format(all_keys)
keys = all_keys[0]
def all_accumulators_have_this_key(key):
for scalar_accumulator in scalar_accumulators:
if key not in scalar_accumulator.Keys():
log.debug('Not all of the accumulators have key %s', key)
return False
return True
keys = [key for key in keys if all_accumulators_have_this_key(key)]
all_scalar_events_per_key = [[scalar_accumulator.Items(key) for scalar_accumulator in scalar_accumulators] for key in keys]
# Get and validate all steps per key
# sorted(all_scalar_events) sorted(scalar_events)
x_per_key = [[tuple(scalar_event.step for scalar_event in sorted(scalar_events)) for scalar_events in sorted(all_scalar_events)]
for all_scalar_events in all_scalar_events_per_key]
# zhehui
# import linear interpolation
# all_steps_per_key = tuple(step_id*1e6 for step_id in range(1e8/1e6))
# modify_all_steps_per_key = tuple(int(step_id*1e6) for step_id in range(1, int(1e8/1e6 + 1)))
plot_step = int(1e7)
max_x = EXPERIMENTS[env]['max_x']
all_steps_per_key = [[tuple(int(step_id) for step_id in range(0, max_x, plot_step)) for scalar_events in sorted(all_scalar_events)]
for all_scalar_events in all_scalar_events_per_key]
for i, all_steps in enumerate(all_steps_per_key):
assert len(set(all_steps)) == 1, "For scalar {} the step numbering or count doesn't match. Step count for all runs: {}".format(
keys[i], [len(steps) for steps in all_steps])
steps_per_key = [all_steps[0] for all_steps in all_steps_per_key]
# Get and average wall times per step per key
# wall_times_per_key = [np.mean([tuple(scalar_event.wall_time for scalar_event in scalar_events) for scalar_events in all_scalar_events], axis=0)
# for all_scalar_events in all_scalar_events_per_key]
# Get values per step per key
values_per_key = [[[scalar_event.value for scalar_event in scalar_events] for scalar_events in all_scalar_events]
for all_scalar_events in all_scalar_events_per_key]
true_reward_key = EXPERIMENTS[env]['key']
key_idx = keys.index(true_reward_key)
values = values_per_key[key_idx]
x = steps_per_key[key_idx]
x_steps = x_per_key[key_idx]
interpolated_y = [[] for _ in values]
for i in range(len(values)):
idx = 0
values[i] = values[i][2:]
x_steps[i] = x_steps[i][2:]
assert len(x_steps[i]) == len(values[i])
for x_idx in x:
while idx < len(x_steps[i]) and x_steps[i][idx] < x_idx:
idx += 1
# log.debug('i: %d, x_idx: %d, idx: %d, len %d', i, x_idx, idx, len(x_steps[i]))
if idx == len(x_steps[i]):
break
if x_idx == 0:
interpolated_value = values[i][idx]
elif idx < len(values[i]) - 1:
interpolated_value = (values[i][idx] + values[i][idx + 1]) / 2
else:
interpolated_value = values[i][idx]
interpolated_y[i].append(interpolated_value)
x = x[:len(interpolated_y[i])]
assert len(interpolated_y[i]) == len(x)
log.debug('Key values: %r', interpolated_y[0][:30])
min_length = len(x)
for i in range(len(values)):
log.debug('Values for seed %d truncated from %d to %d', i, len(interpolated_y[i]), min_length)
interpolated_y[i] = interpolated_y[i][:min_length]
interpolated_keys = dict()
interpolated_keys[true_reward_key] = (x, interpolated_y)
return interpolated_keys
def aggregate(env, experiments, count, ax):
print("Started aggregation {}".format(env))
curr_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = join(curr_dir, 'cache_complex_envs')
cache_env = join(cache_dir, env)
if os.path.isdir(cache_env):
with open(join(cache_env, f'{env}.pickle'), 'rb') as fobj:
interpolated_keys = pickle.load(fobj)
else:
cache_env = ensure_dir_exists(cache_env)
interpolated_keys = extract(env, experiments)
with open(join(cache_env, f'{env}.pickle'), 'wb') as fobj:
pickle.dump(interpolated_keys, fobj)
for key in interpolated_keys.keys():
plot(env, key, interpolated_keys[key], ax, count)
def plot(env, key, interpolated_key, ax, count):
title_text = EXPERIMENTS[env]['title']
ax.set_title(title_text, fontsize=8)
x, y = interpolated_key
y_np = [np.array(yi) for yi in y]
y_np = np.stack(y_np)
if env == 'doom_deadly_corridor':
# fix reward scale
y_np *= 100
y_mean = np.mean(y_np, axis=0)
y_std = np.std(y_np, axis=0)
y_plus_std = np.minimum(y_mean + y_std, y_np.max())
y_minus_std = y_mean - y_std
y_max = np.max(y_np, axis=0)
# Configuration
# fig, ax = plt.subplots()
def mkfunc(x, pos):
if x >= 1e9:
return '%dB' % int(x * 1e-9)
elif x >= 1e6:
return '%dM' % int(x * 1e-6)
elif x >= 1e3:
return '%dK' % int(x * 1e-3)
else:
return '%d' % int(x)
mkformatter = matplotlib.ticker.FuncFormatter(mkfunc)
# ax.xaxis.set_major_formatter(mkformatter)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_linewidth(1.0)
# xlabel_text = env.replace('_', ' ').title()
# plt.xlabel(xlabel_text, fontsize=8)
# zhehui
# if they are bottom plots, add Environment Frames
ax.set_xlabel(EXPERIMENTS[env]['x_label'], fontsize=8)
if count == 0:
ax.set_ylabel('Kills per episode', fontsize=8)
ax.set_xticks(EXPERIMENTS[env]['x_ticks'])
ax.set_yticks(EXPERIMENTS[env]['y_ticks'])
# hide tick of axis
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.tick_params(which='major', length=0)
ax.grid(color='#B3B3B3', linestyle='-', linewidth=0.25, alpha=0.2)
# ax.xaxis.grid(False)
x_delta = 0.05 * x[-1]
ax.set_xlim(xmin=-x_delta, xmax=x[-1] + x_delta)
y_delta = 0.05 * np.max(y_max)
ax.set_ylim(ymin=min(np.min(y_mean) - y_delta, 0.0), ymax=np.max(y_max) + y_delta)
# plt.grid(False)
plt.ticklabel_format(style='sci', axis='x', scilimits=(8, 9))
ax.ticklabel_format(style='plain', axis='y', scilimits=(0, 0))
marker_size = 0
lw = 1.0
lw_max = 0.7
lw_baseline = 0.7
blue = '#1F77B4'
orange = '#FF7F0E'
green = '#2CA02C'
sf_plot, = ax.plot(x, y_mean, color=blue, label=EXPERIMENTS[env]['legend'], linewidth=lw, antialiased=True)
ax.fill_between(x, y_minus_std, y_plus_std, color=blue, alpha=0.25, antialiased=True, linewidth=0.0)
if EXPERIMENTS[env]['is_pbt']:
ax.plot(x, y_max, color='#d62728', label='Population best', linewidth=lw_max, antialiased=True)
if 'baselines' in EXPERIMENTS[env]:
colors = [green, orange]
baselines = EXPERIMENTS[env]['baselines']
for baseline_i, baseline in enumerate(baselines):
baseline_color = colors[baseline_i]
baseline_y, baseline_name = baseline
ax.plot([x[0], x[-1]], [baseline_y, baseline_y], color=baseline_color, label=baseline_name, linewidth=lw_baseline, antialiased=True, linestyle='--')
# ax.legend(prop={'size': 6}, loc='lower right')
# plt.set_tight_layout()
# plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=1, wspace=0)
# plt.margins(0, 0)
# plot_name = f'{env}_{key.replace('/', ' ')}'
# plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_inches='tight', pad_inches=0)
def main():
experiments_dir = '/home/alex/all/projects/sample-factory/train_dir'
all_experiment_dirs_list = [join(experiments_dir, v['dir']) for k, v in EXPERIMENTS.items()]
for experiment_dir in all_experiment_dirs_list:
log.debug('Experiment dir: %s', experiment_dir)
log.debug('Total: %d', len(all_experiment_dirs_list))
for env, details in EXPERIMENTS.items():
env_dir = details['dir']
env_dir = join(experiments_dir, env_dir)
event_files = Path(env_dir).rglob('*.tfevents.*')
event_files = list(event_files)
log.info('Event files: %r', event_files)
env_dirs = set()
for event_file in event_files:
env_dirs.add(os.path.dirname(event_file))
EXPERIMENTS[env]['dirs'] = sorted(list(env_dirs))
log.info('Env dirs for env %s is %r', env, env_dirs)
EXPERIMENT_GROUPS = (('doom_battle', 'doom_battle2'), ('doom_deathmatch', 'doom_duel'))
for group_i, exp_group in enumerate(EXPERIMENT_GROUPS):
fig, (ax1, ax2) = plt.subplots(1, 2)
ax = (ax1, ax2)
count = 0
for env in exp_group:
experiments = EXPERIMENTS[env]['dirs']
aggregate(env, experiments, count, ax[count])
count += 1
if group_i != 0:
handles, labels = ax[-1].get_legend_handles_labels()
lgd = fig.legend(handles, labels, bbox_to_anchor=(0.1, 0.88, 0.8, 0.2), loc='lower left', ncol=4, mode="expand", prop={'size': 6})
lgd.set_in_layout(True)
# zhehui
# plt.show()
# plot_name = f'{env}_{key.replace('/', ' ')}'
# plt.tight_layout()
# plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=1, wspace=0)
# plt.subplots_adjust(wspace=0.12, hspace=0.15)
plt.tight_layout(rect=(0, 0, 1.0, 0.9))
plt.margins(0, 0)
plot_name = f'complex_envs_{group_i}'
if group_i == 0:
plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_inches='tight', pad_inches=0, )
else:
plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_extra_artists=(lgd,))
return 0
if __name__ == '__main__':
sys.exit(main())
| import os
import pickle
import sys
from os.path import join
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from plots.plot_utils import set_matplotlib_params
from utils.utils import log, ensure_dir_exists
set_matplotlib_params()
plt.rcParams['figure.figsize'] = (5.5, 2.3) #(2.5, 2.0) 7.5, 4
B = int(1e9)
EXPERIMENTS = {
'doom_battle': dict(is_pbt=False, dir='doom_battle_appo_v56_4p', key='0_aux/avg_reward', x_ticks=[0, B, 2*B, 3*B, 4*B], max_x=4*B, y_ticks=[0, 10, 20, 30, 40, 50], x_label='Env. frames, skip=4', title='Battle', baselines=((33, 'DFP'), (35, 'DFP+CV')), legend='SampleFactory'),
'doom_battle2': dict(is_pbt=False, dir='doom_battle2_appo_v65_fs4', key='0_aux/avg_true_reward', x_ticks=[0, B, 2*B, 3*B], max_x=3*B, y_ticks=[0, 5, 10, 15, 20, 25], x_label='Env. frames, skip=4', title='Battle2', baselines=((17, 'DFP'), ), legend='SampleFactory'),
'doom_deathmatch': dict(is_pbt=True, dir='doom_bots_v63_pbt', key='0_aux/avg_true_reward', x_ticks=[0, B//2, B, 3*B//2, 2*B, 5*B//2], max_x=5*B//2, y_ticks=[0, 20, 40, 60, 80], x_label='Env. frames, skip=2', title='Deathmatch vs bots', baselines=((12.6, 'Avg. scripted bot'), (22, 'Best scripted bot')), legend='Population mean'),
'doom_duel': dict(is_pbt=True, dir='paper_doom_duel_bots_v65_fs2', key='0_aux/avg_true_reward', x_ticks=[0, B//2, B, 3*B//2, 2*B, 5*B//2], max_x=5*B//2, y_ticks=[0, 10, 20, 30, 40], x_label='Env. frames, skip=2', title='Duel vs bots', baselines=((3.66, 'Avg. scripted bot'), (5, 'Best scripted bot')), legend='Population mean'),
}
PLOT_NAMES = dict(
# doom_my_way_home='Find My Way Home',
# doom_deadly_corridor='Deadly Corridor',
# doom_defend_the_center='Defend the Center',
# doom_defend_the_line='Defend the Line',
# doom_health_gathering='Health Gathering',
# doom_health_gathering_supreme='Health Gathering Supreme',
)
def extract(env, experiments):
# scalar_accumulators = [EventAccumulator(str(dpath / dname / subpath)).Reload().scalars
# for dname in os.listdir(dpath) if dname != FOLDER_NAME and dname in hide_file]
scalar_accumulators = [EventAccumulator(experiment_dir).Reload().scalars for experiment_dir in experiments]
# Filter non event files
scalar_accumulators = [scalar_accumulator for scalar_accumulator in scalar_accumulators if scalar_accumulator.Keys()]
# Get and validate all scalar keys
# zhehui sorted(scalar_accumulator.Keys())
all_keys = [tuple(sorted(scalar_accumulator.Keys())) for scalar_accumulator in scalar_accumulators]
# assert len(set(all_keys)) == 1, "All runs need to have the same scalar keys. There are mismatches in {}".format(all_keys)
keys = all_keys[0]
def all_accumulators_have_this_key(key):
for scalar_accumulator in scalar_accumulators:
if key not in scalar_accumulator.Keys():
log.debug('Not all of the accumulators have key %s', key)
return False
return True
keys = [key for key in keys if all_accumulators_have_this_key(key)]
all_scalar_events_per_key = [[scalar_accumulator.Items(key) for scalar_accumulator in scalar_accumulators] for key in keys]
# Get and validate all steps per key
# sorted(all_scalar_events) sorted(scalar_events)
x_per_key = [[tuple(scalar_event.step for scalar_event in sorted(scalar_events)) for scalar_events in sorted(all_scalar_events)]
for all_scalar_events in all_scalar_events_per_key]
# zhehui
# import linear interpolation
# all_steps_per_key = tuple(step_id*1e6 for step_id in range(1e8/1e6))
# modify_all_steps_per_key = tuple(int(step_id*1e6) for step_id in range(1, int(1e8/1e6 + 1)))
plot_step = int(1e7)
max_x = EXPERIMENTS[env]['max_x']
all_steps_per_key = [[tuple(int(step_id) for step_id in range(0, max_x, plot_step)) for scalar_events in sorted(all_scalar_events)]
for all_scalar_events in all_scalar_events_per_key]
for i, all_steps in enumerate(all_steps_per_key):
assert len(set(all_steps)) == 1, "For scalar {} the step numbering or count doesn't match. Step count for all runs: {}".format(
keys[i], [len(steps) for steps in all_steps])
steps_per_key = [all_steps[0] for all_steps in all_steps_per_key]
# Get and average wall times per step per key
# wall_times_per_key = [np.mean([tuple(scalar_event.wall_time for scalar_event in scalar_events) for scalar_events in all_scalar_events], axis=0)
# for all_scalar_events in all_scalar_events_per_key]
# Get values per step per key
values_per_key = [[[scalar_event.value for scalar_event in scalar_events] for scalar_events in all_scalar_events]
for all_scalar_events in all_scalar_events_per_key]
true_reward_key = EXPERIMENTS[env]['key']
key_idx = keys.index(true_reward_key)
values = values_per_key[key_idx]
x = steps_per_key[key_idx]
x_steps = x_per_key[key_idx]
interpolated_y = [[] for _ in values]
for i in range(len(values)):
idx = 0
values[i] = values[i][2:]
x_steps[i] = x_steps[i][2:]
assert len(x_steps[i]) == len(values[i])
for x_idx in x:
while idx < len(x_steps[i]) and x_steps[i][idx] < x_idx:
idx += 1
# log.debug('i: %d, x_idx: %d, idx: %d, len %d', i, x_idx, idx, len(x_steps[i]))
if idx == len(x_steps[i]):
break
if x_idx == 0:
interpolated_value = values[i][idx]
elif idx < len(values[i]) - 1:
interpolated_value = (values[i][idx] + values[i][idx + 1]) / 2
else:
interpolated_value = values[i][idx]
interpolated_y[i].append(interpolated_value)
x = x[:len(interpolated_y[i])]
assert len(interpolated_y[i]) == len(x)
log.debug('Key values: %r', interpolated_y[0][:30])
min_length = len(x)
for i in range(len(values)):
log.debug('Values for seed %d truncated from %d to %d', i, len(interpolated_y[i]), min_length)
interpolated_y[i] = interpolated_y[i][:min_length]
interpolated_keys = dict()
interpolated_keys[true_reward_key] = (x, interpolated_y)
return interpolated_keys
def aggregate(env, experiments, count, ax):
print("Started aggregation {}".format(env))
curr_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = join(curr_dir, 'cache_complex_envs')
cache_env = join(cache_dir, env)
if os.path.isdir(cache_env):
with open(join(cache_env, f'{env}.pickle'), 'rb') as fobj:
interpolated_keys = pickle.load(fobj)
else:
cache_env = ensure_dir_exists(cache_env)
interpolated_keys = extract(env, experiments)
with open(join(cache_env, f'{env}.pickle'), 'wb') as fobj:
pickle.dump(interpolated_keys, fobj)
for key in interpolated_keys.keys():
plot(env, key, interpolated_keys[key], ax, count)
def plot(env, key, interpolated_key, ax, count):
title_text = EXPERIMENTS[env]['title']
ax.set_title(title_text, fontsize=8)
x, y = interpolated_key
y_np = [np.array(yi) for yi in y]
y_np = np.stack(y_np)
if env == 'doom_deadly_corridor':
# fix reward scale
y_np *= 100
y_mean = np.mean(y_np, axis=0)
y_std = np.std(y_np, axis=0)
y_plus_std = np.minimum(y_mean + y_std, y_np.max())
y_minus_std = y_mean - y_std
y_max = np.max(y_np, axis=0)
# Configuration
# fig, ax = plt.subplots()
def mkfunc(x, pos):
if x >= 1e9:
return '%dB' % int(x * 1e-9)
elif x >= 1e6:
return '%dM' % int(x * 1e-6)
elif x >= 1e3:
return '%dK' % int(x * 1e-3)
else:
return '%d' % int(x)
mkformatter = matplotlib.ticker.FuncFormatter(mkfunc)
# ax.xaxis.set_major_formatter(mkformatter)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_linewidth(1.0)
# xlabel_text = env.replace('_', ' ').title()
# plt.xlabel(xlabel_text, fontsize=8)
# zhehui
# if they are bottom plots, add Environment Frames
ax.set_xlabel(EXPERIMENTS[env]['x_label'], fontsize=8)
if count == 0:
ax.set_ylabel('Kills per episode', fontsize=8)
ax.set_xticks(EXPERIMENTS[env]['x_ticks'])
ax.set_yticks(EXPERIMENTS[env]['y_ticks'])
# hide tick of axis
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.tick_params(which='major', length=0)
ax.grid(color='#B3B3B3', linestyle='-', linewidth=0.25, alpha=0.2)
# ax.xaxis.grid(False)
x_delta = 0.05 * x[-1]
ax.set_xlim(xmin=-x_delta, xmax=x[-1] + x_delta)
y_delta = 0.05 * np.max(y_max)
ax.set_ylim(ymin=min(np.min(y_mean) - y_delta, 0.0), ymax=np.max(y_max) + y_delta)
# plt.grid(False)
plt.ticklabel_format(style='sci', axis='x', scilimits=(8, 9))
ax.ticklabel_format(style='plain', axis='y', scilimits=(0, 0))
marker_size = 0
lw = 1.0
lw_max = 0.7
lw_baseline = 0.7
blue = '#1F77B4'
orange = '#FF7F0E'
green = '#2CA02C'
sf_plot, = ax.plot(x, y_mean, color=blue, label=EXPERIMENTS[env]['legend'], linewidth=lw, antialiased=True)
ax.fill_between(x, y_minus_std, y_plus_std, color=blue, alpha=0.25, antialiased=True, linewidth=0.0)
if EXPERIMENTS[env]['is_pbt']:
ax.plot(x, y_max, color='#d62728', label='Population best', linewidth=lw_max, antialiased=True)
if 'baselines' in EXPERIMENTS[env]:
colors = [green, orange]
baselines = EXPERIMENTS[env]['baselines']
for baseline_i, baseline in enumerate(baselines):
baseline_color = colors[baseline_i]
baseline_y, baseline_name = baseline
ax.plot([x[0], x[-1]], [baseline_y, baseline_y], color=baseline_color, label=baseline_name, linewidth=lw_baseline, antialiased=True, linestyle='--')
# ax.legend(prop={'size': 6}, loc='lower right')
# plt.set_tight_layout()
# plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=1, wspace=0)
# plt.margins(0, 0)
# plot_name = f'{env}_{key.replace("/", " ")}'
# plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_inches='tight', pad_inches=0)
def main():
experiments_dir = '/home/alex/all/projects/sample-factory/train_dir'
all_experiment_dirs_list = [join(experiments_dir, v['dir']) for k, v in EXPERIMENTS.items()]
for experiment_dir in all_experiment_dirs_list:
log.debug('Experiment dir: %s', experiment_dir)
log.debug('Total: %d', len(all_experiment_dirs_list))
for env, details in EXPERIMENTS.items():
env_dir = details['dir']
env_dir = join(experiments_dir, env_dir)
event_files = Path(env_dir).rglob('*.tfevents.*')
event_files = list(event_files)
log.info('Event files: %r', event_files)
env_dirs = set()
for event_file in event_files:
env_dirs.add(os.path.dirname(event_file))
EXPERIMENTS[env]['dirs'] = sorted(list(env_dirs))
log.info('Env dirs for env %s is %r', env, env_dirs)
EXPERIMENT_GROUPS = (('doom_battle', 'doom_battle2'), ('doom_deathmatch', 'doom_duel'))
for group_i, exp_group in enumerate(EXPERIMENT_GROUPS):
fig, (ax1, ax2) = plt.subplots(1, 2)
ax = (ax1, ax2)
count = 0
for env in exp_group:
experiments = EXPERIMENTS[env]['dirs']
aggregate(env, experiments, count, ax[count])
count += 1
if group_i != 0:
handles, labels = ax[-1].get_legend_handles_labels()
lgd = fig.legend(handles, labels, bbox_to_anchor=(0.1, 0.88, 0.8, 0.2), loc='lower left', ncol=4, mode="expand", prop={'size': 6})
lgd.set_in_layout(True)
# zhehui
# plt.show()
# plot_name = f'{env}_{key.replace("/", " ")}'
# plt.tight_layout()
# plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=1, wspace=0)
# plt.subplots_adjust(wspace=0.12, hspace=0.15)
plt.tight_layout(rect=(0, 0, 1.0, 0.9))
plt.margins(0, 0)
plot_name = f'complex_envs_{group_i}'
if group_i == 0:
plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_inches='tight', pad_inches=0, )
else:
plt.savefig(os.path.join(os.getcwd(), f'../final_plots/reward_{plot_name}.pdf'), format='pdf', bbox_extra_artists=(lgd,))
return 0
if __name__ == '__main__':
sys.exit(main())
|
"""
Import and reconstruction of sketch and extrude designs
from the Reconstruction Subset
"""
import adsk.core
import adsk.fusion
import traceback
import json
import os
import sys
import time
import math
from pathlib import Path
from collections import OrderedDict
import deserialize
class SketchExtrudeImporter():
def __init__(self, json_data=None):
self.app = adsk.core.Application.get()
if json_data is not None:
if isinstance(json_data, dict):
self.data = json_data
else:
with open(json_data, encoding="utf8") as f:
self.data = json.load(f, object_pairs_hook=OrderedDict)
product = self.app.activeProduct
self.design = adsk.fusion.Design.cast(product)
# Callback during reconstruction
# called incrementally when the design changs
self.reconstruct_cb = None
# --------------------------------------------------------
# PUBLIC API CALLS
# --------------------------------------------------------
def reconstruct(self, reconstruct_cb=None, reconstruction=None):
"""Reconstruct the full design"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
timeline = self.data["timeline"]
entities = self.data["entities"]
# Get the profiles used in this design
profiles_used = self.get_extrude_profiles(timeline, entities)
# Keep track of the sketch profiles
sketch_profiles = {}
for timeline_object in timeline:
entity_uuid = timeline_object["entity"]
entity_index = timeline_object["index"]
entity = entities[entity_uuid]
if entity["type"] == "Sketch":
# Only reconstruct this sketch if it is used with an extrude
if entity_uuid in profiles_used["sketches"]:
sketch, sketch_profile_set = self.reconstruct_sketch_feature(
entity, sketch_profiles,
sketch_uuid=entity_uuid, sketch_index=entity_index
)
if sketch_profile_set:
sketch_profiles.update(**sketch_profile_set)
elif entity["type"] == "ExtrudeFeature":
self.reconstruct_extrude_feature(entity, entity_uuid, entity_index, sketch_profiles)
def reconstruct_sketch(self, sketch_data, sketch_uuid=None,
sketch_index=None, sketch_plane=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct and return just a single sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
sketch, sketch_profile_set = self.reconstruct_sketch_feature(
sketch_data, {},
sketch_uuid=sketch_uuid, sketch_index=sketch_index,
sketch_plane=sketch_plane, transform=transform
)
return sketch
def reconstruct_profile(self, sketch_data, sketch_name, profile_uuid,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct a single profile from a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
profile_data = sketch_data["profiles"][profile_uuid]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
self.reconstruct_trimmed_curves(sketch, profile_data, transform)
return sketch
def reconstruct_curve(self, sketch_data, sketch_name, curve_uuid,
sketch_uuid=None, sketch_index=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct a single curve in a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
curve_data = sketch_data["curves"][curve_uuid]
points_data = sketch_data["points"]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
self.reconstruct_sketch_curve(
sketch,
curve_data,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
adsk.doEvents()
return sketch
def reconstruct_curves(self, sketch_data, sketch_name,
sketch_uuid=None, sketch_index=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct all curves in a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
points_data = sketch_data["points"]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
# Turn off sketch compute until we add all the curves
sketch.isComputeDeferred = True
for curve_uuid, curve_data in sketch_data["curves"].items():
self.reconstruct_sketch_curve(
sketch,
curve_data,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
sketch.isComputeDeferred = False
adsk.doEvents()
return sketch
# --------------------------------------------------------
# SKETCH FEATURE
# --------------------------------------------------------
def get_extrude_profiles(self, timeline, entities):
"""Get the profiles used with extrude operations"""
profiles = set()
sketches = set()
for timeline_object in timeline:
entity_key = timeline_object["entity"]
entity = entities[entity_key]
if entity["type"] == "ExtrudeFeature":
for profile in entity["profiles"]:
profiles.add(profile["profile"])
sketches.add(profile["sketch"])
return {
"profiles": profiles,
"sketches": sketches
}
def find_profile(self, reconstructed_profiles, profile_uuid, profile_data, transform):
# Sketch profiles are automatically generated by Fusion
# After we have added the curves we have to traverse the profiles
# to find one with all of the curve uuids from the original
sorted_curve_uuids = self.get_curve_uuids(profile_data)
# print(f"Finding profile {profile_uuid} with {len(sorted_curve_uuids)} curves")
for index, profile_dict in enumerate(reconstructed_profiles):
profile = profile_dict["profile"]
profile_index = profile_dict["profile_index"]
sorted_found_curve_uuids = profile_dict["curve_uuids"]
if sorted_found_curve_uuids == sorted_curve_uuids and self.are_profile_properties_identical(profile, profile_data, transform):
# print(f"Profile found with {len(sorted_curve_uuids)} curve uuids")
return profile_dict, index
# print(f"Profile not found: {profile_uuid} with {len(sorted_curve_uuids)} curves")
return None, -1
def are_profile_properties_identical(self, profile, profile_data, transform):
profile_props = profile.areaProperties(adsk.fusion.CalculationAccuracy.HighCalculationAccuracy)
tolerance = 0.000001
if not math.isclose(profile_props.area, profile_data["properties"]["area"], abs_tol=tolerance):
# print("Profile area doesn't match")
return False
if not math.isclose(profile_props.perimeter, profile_data["properties"]["perimeter"], abs_tol=tolerance):
# print("Profile perimeter doesn't match")
return False
centroid_point = deserialize.point3d(profile_data["properties"]["centroid"])
centroid_point.transformBy(transform)
if not math.isclose(profile_props.centroid.x, centroid_point.x, abs_tol=tolerance):
# print("Centroid.x doesn't match")
return False
if not math.isclose(profile_props.centroid.y, centroid_point.y, abs_tol=tolerance):
# print("Centroid.y doesn't match")
return False
if not math.isclose(profile_props.centroid.z, centroid_point.z, abs_tol=tolerance):
# print("Centroid.z doesn't match")
return False
return True
def get_profile_curve_uuids(self, sketch, sketch_uuid):
reconstructed_profiles = []
for profile_index, profile in enumerate(sketch.profiles):
# We use a set as there can be duplicate curves in the list
found_curve_uuids = set()
for loop in profile.profileLoops:
for curve in loop.profileCurves:
sketch_ent = curve.sketchEntity
curve_uuid = self.get_uuid(sketch_ent)
if curve_uuid is not None:
found_curve_uuids.add(curve_uuid)
sorted_found_curve_uuids = sorted(list(found_curve_uuids))
reconstructed_profiles.append({
"profile": profile,
"profile_index": profile_index,
"sketch": sketch,
"sketch_uuid": sketch_uuid,
"curve_uuids": sorted_found_curve_uuids
})
return reconstructed_profiles
def get_uuid(self, entity):
uuid_att = entity.attributes.itemByName("Dataset", "uuid")
if uuid_att is not None:
return uuid_att.value
else:
return None
def set_uuid(self, entity, unique_id):
uuid_att = entity.attributes.itemByName("Dataset", "uuid")
if uuid_att is None:
entity.attributes.add("Dataset", "uuid", unique_id)
def get_curve_uuids(self, profile_data):
loops = profile_data["loops"]
# Use a set to remove duplicates
curve_uuids = set()
for loop in loops:
profile_curves = loop["profile_curves"]
for profile_curve in profile_curves:
curve_uuids.add(profile_curve["curve"])
return sorted(list(curve_uuids))
def find_transform_for_sketch_geom(self, sketch_transform, original_transform_json):
# The sketch transform operates on a sketch point p_sketch and transforms it into
# world space (or at least the space of the assembly context)
#
# p_world = T * p_sketch
#
# Now we need to cope with the sketch plane having two different transforms when we
# extract and when we import it.
#
# We know the one thing which stays constant is the final point in world space, so
# we have
#
# p_world = T_extract * p_sketch = T_import * T_correction * p_sketch
#
# hence
#
# T_extract = T_import * T_correction
#
# Now premultiplying both sides by T_import^-1 gives us
#
# T_correction = T_import^-1 * T_extract
#
# This function need to compute T_correction
# sketch_transform is T_import. Here we find T_import^-1
ok = sketch_transform.invert()
assert ok
# Set xform = T_extract
xform = deserialize.matrix3d(original_transform_json)
# The transformBy() function must be "premultiply"
# so here we have
# xform = T_import^-1 * T_extract
xform.transformBy(sketch_transform)
return xform
def reconstruct_sketch_feature(self, sketch_data, sketch_profiles,
sketch_uuid=None, sketch_index=None,
sketch_plane=None, transform=None):
# Skip empty sketches
if ("curves" not in sketch_data or "profiles" not in sketch_data or
"points" not in sketch_data):
return None
sketches = self.reconstruction.sketches
# Find the right sketch plane to use
if sketch_plane is None:
sketch_plane = self.get_sketch_plane(sketch_data["reference_plane"], sketch_profiles)
sketch = sketches.addWithoutEdges(sketch_plane)
# If we want to manually overide the transform we can
# but the sketch may be flipped without the call to
# find_transform_for_sketch_geom()
if transform is not None:
transform_for_sketch_geom = transform
else:
# We need to apply some other transform to the sketch data
# as sketch geometry created via the UI has a slightly different
# coordinate system when created via the API
# This applies when the sketch plane references other geometry
# like a B-Rep face
transform_for_sketch_geom = adsk.core.Matrix3D.create()
sketch_transform = sketch.transform
transform_for_sketch_geom = self.find_transform_for_sketch_geom(sketch_transform, sketch_data["transform"])
if self.reconstruct_cb is not None:
cb_data = {
"sketch": sketch,
"sketch_name": sketch_data["name"],
"corrective_transform": transform_for_sketch_geom
}
if sketch_uuid is not None:
cb_data["sketch_uuid"] = sketch_uuid
self.reconstruct_cb(cb_data)
# Draw exactly what the user drew and then search for the profiles
new_sketch_profiles = self.reconstruct_curves_to_profiles(sketch, sketch_data, sketch_uuid, sketch_index, transform_for_sketch_geom)
adsk.doEvents()
return sketch, new_sketch_profiles
def get_sketch_plane(self, reference_plane, sketch_profiles):
# ConstructionPlane as reference plane
if reference_plane["type"] == "ConstructionPlane" and "name" in reference_plane:
sketch_plane = deserialize.construction_plane(reference_plane["name"])
if sketch_plane is not None:
return sketch_plane
# BRepFace as reference plane
elif reference_plane["type"] == "BRepFace" and "point_on_face" in reference_plane:
face = deserialize.face_by_point3d(reference_plane["point_on_face"])
if face is not None:
if face.geometry.surfaceType == adsk.core.SurfaceTypes.PlaneSurfaceType:
return face
else:
print(f"Sketch plane (BRepFace) - invalid surface type {face.geometry.surfaceType}")
else:
print("Sketch plane point on face not found!")
# Sketch Profile as reference plane
elif reference_plane["type"] == "Profile" and "profile" in reference_plane:
profile_uuid = reference_plane["profile"]
# We could reference the original sketch plane like this:
# return profile.parentSketch.referencePlane
# But the sketch plane can differ from the profile plane
# so we go ahead and find the actual profile plane
sketch_profile = self.get_sketch_profile_reference(profile_uuid, sketch_profiles)
if sketch_profile is not None:
# Note: The API doesn't support creating references
# to sketch profiles directly
# So instead we create a construction plane from the profile
# and use that
# This preserves the reference indirectly
# through the construction plane
planes = self.reconstruction.constructionPlanes
plane_input = planes.createInput()
offset_distance = adsk.core.ValueInput.createByReal(0)
plane_input.setByOffset(sketch_profile, offset_distance)
plane = planes.add(plane_input)
return plane
return self.reconstruction.xYConstructionPlane
def reconstruct_curves_to_profiles(self, sketch, sketch_data, sketch_uuid, sketch_index, transform):
# Turn off sketch compute until we add all the curves
sketch.isComputeDeferred = True
self.reconstruct_sketch_curves(sketch, sketch_data, sketch_uuid, sketch_index, transform)
sketch.isComputeDeferred = False
# If we draw the user curves
# we have to recover the profiles that Fusion generates
# First pull out the list of reconstructed profile curve uuids
reconstructed_profiles = self.get_profile_curve_uuids(sketch, sketch_uuid)
sketch_profiles = {}
missing_profiles = {}
# We first try and find exact matches
# i.e. a profile with the same set of (deduplicated) curve ids
# and with an area/perimeter/centroid that matches
for profile_uuid, profile_data in sketch_data["profiles"].items():
# print("Finding profile", profile_data["profile_uuid"])
sketch_profile_data, reconstructed_profile_index = self.find_profile(
reconstructed_profiles, profile_uuid, profile_data, transform
)
if sketch_profile_data is not None:
sketch_profiles[profile_uuid] = sketch_profile_data
# Remove the matched profile from the pool
del reconstructed_profiles[reconstructed_profile_index]
else:
missing_profiles[profile_uuid] = profile_data
# Sometimes the exact match will fail,
# so we search for the most 'similar' profile,
# with the most common curve uuids,
# remaining in the reconstructed profile set
missing_profile_count = len(missing_profiles)
if missing_profile_count > 0:
print(f"{missing_profile_count} Missing profiles and {len(reconstructed_profiles)} remaining reconstructed profiles")
matched_profiles = 0
for missing_profile_uuid, missing_profile_data in missing_profiles.items():
best_match_profile_data = self.get_closest_profile(
missing_profile_data, reconstructed_profiles, missing_profile_uuid
)
if best_match_profile_data is not None:
sketch_profiles[missing_profile_uuid] = best_match_profile_data
matched_profiles += 1
unmatched_profiles = missing_profile_count - matched_profiles
if unmatched_profiles > 0:
print(f"{unmatched_profiles} left over unmatched profiles!")
return sketch_profiles
def get_closest_profile(self, missing_profile_data, reconstructed_profiles, missing_profile_uuid):
"""Try and find the closest profile match based on overlap of curve ids"""
if len(reconstructed_profiles) == 1:
return reconstructed_profiles[0]
sorted_curve_uuids = self.get_curve_uuids(missing_profile_data)
sorted_curve_uuids_count = len(sorted_curve_uuids)
max_score = 0
best_match_index = -1
for index, reconstructed_profile in enumerate(reconstructed_profiles):
overlap = self.get_profile_curve_overlap_count(sorted_curve_uuids, reconstructed_profile["curve_uuids"])
reconstructed_profile_curve_uuids_coint = len(reconstructed_profile["curve_uuids"])
score = overlap - abs(reconstructed_profile_curve_uuids_coint-sorted_curve_uuids_count)
if score > max_score:
best_match_index = index
max_score = score
if best_match_index >= 0:
print(f"""Matching profile {missing_profile_uuid} with {sorted_curve_uuids_count} curves
to a left over reconstructed profile with {len(reconstructed_profiles[best_match_index]["curve_uuids"])} curves""")
return reconstructed_profiles[best_match_index]
else:
return None
def get_profile_curve_overlap_count(self, original, reconstructed):
intersection = set(original) & set(reconstructed)
return len(intersection)
def reconstruct_sketch_curves(self, sketch, sketch_data, sketch_uuid, sketch_index, transform):
"""Reconstruct the sketch curves in profile order"""
curves_data = sketch_data["curves"]
points_data = sketch_data["points"]
profiles_data = sketch_data["profiles"]
current_curves_data = OrderedDict(curves_data)
# curve_keys = curves_data.keys()
# Redraw the curves in the order of the profiles
for profile_uuid, profile in profiles_data.items():
for loop in profile["loops"]:
for profile_curve in loop["profile_curves"]:
curve_uuid = profile_curve["curve"]
# Only draw the curves that haven't been draw already
if curve_uuid in current_curves_data:
curve = curves_data[curve_uuid]
self.reconstruct_sketch_curve(
sketch,
curve,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
# Remove the curve from list of curves to draw
del current_curves_data[curve_uuid]
# Next add the remaining curves not used in profiles
for curve_uuid, curve in current_curves_data.items():
self.reconstruct_sketch_curve(
sketch,
curve,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
def get_sketch_profile_reference(self, profile_uuid, sketch_profiles):
"""Return a reference to the sketch profile from our stored dict"""
if profile_uuid not in sketch_profiles:
return None
# We have a reference we stored directly of the profile
# sketch_profile = sketch_profiles[profile_uuid]["profile"]
# But this reference to the profile fails if you toggle
# visibility of the sketch off after the reference is created
# as we do to generate image output of the sequence
# So instead we find the reference again via the sketch
sketch = sketch_profiles[profile_uuid]["sketch"]
sketch_profile_index = sketch_profiles[profile_uuid]["profile_index"]
sketch_profile = sketch.profiles[sketch_profile_index]
return sketch_profile
# --------------------------------------------------------
# PROFILE CURVES
# --------------------------------------------------------
def reconstruct_sketch_curve(self, sketch, curve_data, curve_uuid, points_data,
transform=None, sketch_uuid=None,
sketch_index=None):
"""Reconstruct a sketch curve"""
if curve_data["construction_geom"]:
return
if transform is None:
transform = adsk.core.Matrix3D.create()
if curve_data["type"] == "SketchLine":
curve_obj = self.reconstruct_sketch_line(
sketch.sketchCurves.sketchLines,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchArc":
curve_obj = self.reconstruct_sketch_arc(
sketch.sketchCurves.sketchArcs,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchCircle":
curve_obj = self.reconstruct_sketch_circle(
sketch.sketchCurves.sketchCircles,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchEllipse":
curve_obj = self.reconstruct_sketch_ellipse(
sketch.sketchCurves.sketchEllipses,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchFittedSpline":
curve_obj = self.reconstruct_sketch_fitted_spline(
sketch.sketchCurves.sketchFittedSplines,
curve_data, curve_uuid, transform
)
else:
raise Exception(f"Unsupported curve type: {curve_data["type"]}")
if self.reconstruct_cb is not None:
cb_data = {
"sketch": sketch,
"sketch_name": sketch.name,
"curve": curve_obj,
"curve_uuid": curve_uuid
}
if sketch_uuid is not None:
cb_data["sketch_id"] = sketch_uuid
if sketch_index is not None:
cb_data["sketch_index"] = sketch_index
self.reconstruct_cb(cb_data)
def reconstruct_sketch_line(self, sketch_lines, curve_data, curve_uuid, points_data, transform):
start_point_uuid = curve_data["start_point"]
end_point_uuid = curve_data["end_point"]
start_point = deserialize.point3d(points_data[start_point_uuid])
end_point = deserialize.point3d(points_data[end_point_uuid])
start_point.transformBy(transform)
end_point.transformBy(transform)
line = sketch_lines.addByTwoPoints(start_point, end_point)
self.set_uuid(line, curve_uuid)
return line
def reconstruct_sketch_arc(self, sketch_arcs, curve_data, curve_uuid, points_data, transform):
start_point_uuid = curve_data["start_point"]
center_point_uuid = curve_data["center_point"]
start_point = deserialize.point3d(points_data[start_point_uuid])
center_point = deserialize.point3d(points_data[center_point_uuid])
start_point.transformBy(transform)
center_point.transformBy(transform)
sweep_angle = curve_data["end_angle"] - curve_data["start_angle"]
arc = sketch_arcs.addByCenterStartSweep(center_point, start_point, sweep_angle)
self.set_uuid(arc, curve_uuid)
return arc
def reconstruct_sketch_circle(self, sketch_circles, curve_data, curve_uuid, points_data, transform):
center_point_uuid = curve_data["center_point"]
center_point = deserialize.point3d(points_data[center_point_uuid])
center_point.transformBy(transform)
radius = curve_data["radius"]
circle = sketch_circles.addByCenterRadius(center_point, radius)
self.set_uuid(circle, curve_uuid)
return circle
def reconstruct_sketch_ellipse(self, sketch_ellipses, curve_data, curve_uuid, points_data, transform):
# Ellipse reconstruction requires us to provide 3 points:
# - Center point
# - Major axis point
# - (Minor axis) point that the ellipse will pass through
# Center point
center_point_uuid = curve_data["center_point"]
center_point = deserialize.point3d(points_data[center_point_uuid])
center_point_vector = center_point.asVector()
# Major axis point
# Take the vector for the major axis
# then normalize it
# then scale it to the radius of the major axis
# and offset by the center point
major_axis = deserialize.vector3d(curve_data["major_axis"])
major_axis_radius = curve_data["major_axis_radius"]
major_axis.normalize()
major_axis_vector = major_axis.copy()
major_axis_vector.scaleBy(major_axis_radius)
major_axis_point = major_axis_vector.asPoint()
major_axis_point.translateBy(center_point_vector)
# Minor axis point
# Rotate 90 deg around z from the major axis
# then scale and offset by the center point
minor_axis_radius = curve_data["minor_axis_radius"]
rot_matrix = adsk.core.Matrix3D.create()
origin = adsk.core.Point3D.create()
axis = adsk.core.Vector3D.create(0.0, 0.0, 1.0)
rot_matrix.setToRotation(math.radians(90), axis, origin)
minor_axis = major_axis.copy()
minor_axis.transformBy(rot_matrix)
minor_axis_vector = minor_axis.copy()
minor_axis_vector.scaleBy(minor_axis_radius)
minor_axis_point = minor_axis_vector.asPoint()
minor_axis_point.translateBy(center_point_vector)
# Finally apply the sketch alignment matrix
major_axis_point.transformBy(transform)
minor_axis_point.transformBy(transform)
center_point.transformBy(transform)
ellipse = sketch_ellipses.add(center_point, major_axis_point, minor_axis_point)
self.set_uuid(ellipse, curve_uuid)
return ellipse
def reconstruct_sketch_fitted_spline(self, sketch_fitted_splines, curve_data, curve_uuid, transform):
nurbs_curve = self.get_nurbs_curve(curve_data, transform)
spline = sketch_fitted_splines.addByNurbsCurve(nurbs_curve)
self.set_uuid(spline, curve_uuid)
return spline
def get_nurbs_curve(self, curve_data, transform):
control_points = deserialize.point3d_list(curve_data["control_points"], transform)
nurbs_curve = None
if curve_data["rational"] is True:
nurbs_curve = adsk.core.NurbsCurve3D.createRational(
control_points, curve_data["degree"],
curve_data["knots"], curve_data["weights"],
curve_data["periodic"]
)
else:
nurbs_curve = adsk.core.NurbsCurve3D.createNonRational(
control_points, curve_data["degree"],
curve_data["knots"], curve_data["periodic"]
)
return nurbs_curve
# --------------------------------------------------------
# TRIMMED PROFILE CURVES
# --------------------------------------------------------
def reconstruct_trimmed_curves(self, sketch, profile_data, transform):
loops = profile_data["loops"]
for loop in loops:
profile_curves = loop["profile_curves"]
for curve_data in profile_curves:
self.reconstruct_trimmed_curve(sketch, curve_data, transform)
def reconstruct_trimmed_curve(self, sketch, curve_data, transform):
if curve_data["type"] == "Line3D":
self.reconstruct_line(
sketch.sketchCurves.sketchLines, curve_data, transform
)
elif curve_data["type"] == "Arc3D":
self.reconstruct_arc(
sketch.sketchCurves.sketchArcs, curve_data, transform
)
elif curve_data["type"] == "Circle3D":
self.reconstruct_circle(
sketch.sketchCurves.sketchCircles, curve_data, transform
)
elif curve_data["type"] == "Ellipse3D":
self.reconstruct_ellipse(
sketch.sketchCurves.sketchEllipses, curve_data, transform
)
elif curve_data["type"] == "NurbsCurve3D":
self.reconstruct_nurbs_curve(
sketch.sketchCurves.sketchFittedSplines, curve_data, transform
)
else:
raise Exception(f"Unsupported curve type: {curve_data["type"]}")
def reconstruct_line(self, sketch_lines, curve_data, transform):
start_point = deserialize.point3d(curve_data["start_point"])
start_point.transformBy(transform)
end_point = deserialize.point3d(curve_data["end_point"])
end_point.transformBy(transform)
line = sketch_lines.addByTwoPoints(start_point, end_point)
self.set_uuid(line, curve_data["curve"])
return line
def reconstruct_arc(self, sketch_arcs, curve_data, transform):
start_point = deserialize.point3d(curve_data["start_point"])
start_point.transformBy(transform)
center_point = deserialize.point3d(curve_data["center_point"])
center_point.transformBy(transform)
sweep_angle = curve_data["end_angle"] - curve_data["start_angle"]
arc = sketch_arcs.addByCenterStartSweep(center_point, start_point, sweep_angle)
self.set_uuid(arc, curve_data["curve"])
return arc
def reconstruct_circle(self, sketch_circles, curve_data, transform):
center_point = deserialize.point3d(curve_data["center_point"])
center_point.transformBy(transform)
radius = curve_data["radius"]
circle = sketch_circles.addByCenterRadius(center_point, radius)
self.set_uuid(circle, curve_data["curve"])
return circle
def reconstruct_ellipse(self, sketch_ellipses, curve_data, transform):
# Ellipse reconstruction requires us to provide 3 points:
# - Center point
# - Major axis point
# - (Minor axis) point that the ellipse will pass through
# Center point
center_point = deserialize.point3d(curve_data["center_point"])
center_point_vector = center_point.asVector()
# Major axis point
# Take the vector for the major axis
# then normalize it
# then scale it to the radius of the major axis
# and offset by the center point
major_axis = deserialize.vector3d(curve_data["major_axis"])
major_axis_radius = curve_data["major_axis_radius"]
major_axis.normalize()
major_axis_vector = major_axis.copy()
major_axis_vector.scaleBy(major_axis_radius)
major_axis_point = major_axis_vector.asPoint()
major_axis_point.translateBy(center_point_vector)
# Minor axis point
# Rotate 90 deg around z from the major axis
# then scale and offset by the center point
minor_axis_radius = curve_data["minor_axis_radius"]
rot_matrix = adsk.core.Matrix3D.create()
origin = adsk.core.Point3D.create()
axis = adsk.core.Vector3D.create(0.0, 0.0, 1.0)
rot_matrix.setToRotation(math.radians(90), axis, origin)
minor_axis = major_axis.copy()
minor_axis.transformBy(rot_matrix)
minor_axis_vector = minor_axis.copy()
minor_axis_vector.scaleBy(minor_axis_radius)
minor_axis_point = minor_axis_vector.asPoint()
minor_axis_point.translateBy(center_point_vector)
# Finally apply the sketch alignment matrix
major_axis_point.transformBy(transform)
minor_axis_point.transformBy(transform)
center_point.transformBy(transform)
ellipse = sketch_ellipses.add(center_point, major_axis_point, minor_axis_point)
self.set_uuid(ellipse, curve_data["curve"])
return ellipse
def reconstruct_nurbs_curve(self, sketch_fitted_splines, curve_data, transform):
nurbs_curve = self.get_nurbs_curve(curve_data, transform)
spline = sketch_fitted_splines.addByNurbsCurve(nurbs_curve)
self.set_uuid(spline, curve_data["curve"])
return spline
# --------------------------------------------------------
# EXTRUDE FEATURE
# --------------------------------------------------------
def reconstruct_extrude_feature(self, extrude_data, extrude_uuid, extrude_index, sketch_profiles):
extrudes = self.reconstruction.features.extrudeFeatures
# There can be more than one profile, so we create an object collection
extrude_profiles = adsk.core.ObjectCollection.create()
for profile in extrude_data["profiles"]:
profile_uuid = profile["profile"]
sketch_profile = self.get_sketch_profile_reference(profile_uuid, sketch_profiles)
extrude_profiles.add(sketch_profile)
# The operation defines if the extrusion becomes a new body
# a new component or cuts/joins another body (i.e. boolean operation)
operation = deserialize.feature_operations(extrude_data["operation"])
extrude_input = extrudes.createInput(extrude_profiles, operation)
# Simple extrusion in one direction
if extrude_data["extent_type"] == "OneSideFeatureExtentType":
self.set_one_side_extrude_input(extrude_input, extrude_data["extent_one"])
# Extrusion in two directions with different distances
elif extrude_data["extent_type"] == "TwoSidesFeatureExtentType":
self.set_two_side_extrude_input(extrude_input, extrude_data["extent_one"], extrude_data["extent_two"])
# Symmetrical extrusion by the same distance on each side
elif extrude_data["extent_type"] == "SymmetricFeatureExtentType":
self.set_symmetric_extrude_input(extrude_input, extrude_data["extent_one"])
# The start extent is initialized to be the profile plane
# but we may need to change it to an offset
# after all other changes
self.set_start_extent(extrude_input, extrude_data["start_extent"])
extrude = extrudes.add(extrude_input)
if self.reconstruct_cb is not None:
self.reconstruct_cb({
"extrude": extrude,
"extrude_name": extrude_data["name"],
"extrude_id": extrude_uuid,
"extrude_index": extrude_index
})
return extrude
def set_start_extent(self, extrude_input, start_extent):
# Only handle the offset case
# ProfilePlaneStartDefinition is already setup
# and other cases we don't handle
if start_extent["type"] == "OffsetStartDefinition":
offset_distance = adsk.core.ValueInput.createByReal(start_extent["offset"]["value"])
offset_start_def = adsk.fusion.OffsetStartDefinition.create(offset_distance)
extrude_input.startExtent = offset_start_def
def set_one_side_extrude_input(self, extrude_input, extent_one):
distance = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
extent_distance = adsk.fusion.DistanceExtentDefinition.create(distance)
taper_angle = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
extrude_input.setOneSideExtent(extent_distance, adsk.fusion.ExtentDirections.PositiveExtentDirection, taper_angle)
def set_two_side_extrude_input(self, extrude_input, extent_one, extent_two):
distance_one = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
distance_two = adsk.core.ValueInput.createByReal(extent_two["distance"]["value"])
extent_distance_one = adsk.fusion.DistanceExtentDefinition.create(distance_one)
extent_distance_two = adsk.fusion.DistanceExtentDefinition.create(distance_two)
taper_angle_one = adsk.core.ValueInput.createByReal(0)
taper_angle_two = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle_one = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
if "taper_angle" in extent_two:
taper_angle_two = adsk.core.ValueInput.createByReal(extent_two["taper_angle"]["value"])
extrude_input.setTwoSidesExtent(extent_distance_one, extent_distance_two, taper_angle_one, taper_angle_two)
def set_symmetric_extrude_input(self, extrude_input, extent_one):
# SYMMETRIC EXTRUDE
# Symmetric extent is currently buggy when a taper is applied
# So instead we use a two sided extent with symmetry
# Note that the distance is not a DistanceExtentDefinition
# distance = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
# taper_angle = adsk.core.ValueInput.createByReal(0)
# if "taper_angle" in extent_one:
# taper_angle = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
# is_full_length = extent_one["is_full_length"]
# extrude_input.setSymmetricExtent(distance, is_full_length, taper_angle)
#
# TWO SIDED EXTRUDE WORKAROUND
distance = extent_one["distance"]["value"]
if extent_one["is_full_length"]:
distance = distance * 0.5
distance_one = adsk.core.ValueInput.createByReal(distance)
distance_two = adsk.core.ValueInput.createByReal(distance)
extent_distance_one = adsk.fusion.DistanceExtentDefinition.create(distance_one)
extent_distance_two = adsk.fusion.DistanceExtentDefinition.create(distance_two)
taper_angle_one = adsk.core.ValueInput.createByReal(0)
taper_angle_two = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle_one = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
taper_angle_two = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
extrude_input.setTwoSidesExtent(extent_distance_one, extent_distance_two, taper_angle_one, taper_angle_two)
| """
Import and reconstruction of sketch and extrude designs
from the Reconstruction Subset
"""
import adsk.core
import adsk.fusion
import traceback
import json
import os
import sys
import time
import math
from pathlib import Path
from collections import OrderedDict
import deserialize
class SketchExtrudeImporter():
def __init__(self, json_data=None):
self.app = adsk.core.Application.get()
if json_data is not None:
if isinstance(json_data, dict):
self.data = json_data
else:
with open(json_data, encoding="utf8") as f:
self.data = json.load(f, object_pairs_hook=OrderedDict)
product = self.app.activeProduct
self.design = adsk.fusion.Design.cast(product)
# Callback during reconstruction
# called incrementally when the design changs
self.reconstruct_cb = None
# --------------------------------------------------------
# PUBLIC API CALLS
# --------------------------------------------------------
def reconstruct(self, reconstruct_cb=None, reconstruction=None):
"""Reconstruct the full design"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
timeline = self.data["timeline"]
entities = self.data["entities"]
# Get the profiles used in this design
profiles_used = self.get_extrude_profiles(timeline, entities)
# Keep track of the sketch profiles
sketch_profiles = {}
for timeline_object in timeline:
entity_uuid = timeline_object["entity"]
entity_index = timeline_object["index"]
entity = entities[entity_uuid]
if entity["type"] == "Sketch":
# Only reconstruct this sketch if it is used with an extrude
if entity_uuid in profiles_used["sketches"]:
sketch, sketch_profile_set = self.reconstruct_sketch_feature(
entity, sketch_profiles,
sketch_uuid=entity_uuid, sketch_index=entity_index
)
if sketch_profile_set:
sketch_profiles.update(**sketch_profile_set)
elif entity["type"] == "ExtrudeFeature":
self.reconstruct_extrude_feature(entity, entity_uuid, entity_index, sketch_profiles)
def reconstruct_sketch(self, sketch_data, sketch_uuid=None,
sketch_index=None, sketch_plane=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct and return just a single sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
sketch, sketch_profile_set = self.reconstruct_sketch_feature(
sketch_data, {},
sketch_uuid=sketch_uuid, sketch_index=sketch_index,
sketch_plane=sketch_plane, transform=transform
)
return sketch
def reconstruct_profile(self, sketch_data, sketch_name, profile_uuid,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct a single profile from a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
profile_data = sketch_data["profiles"][profile_uuid]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
self.reconstruct_trimmed_curves(sketch, profile_data, transform)
return sketch
def reconstruct_curve(self, sketch_data, sketch_name, curve_uuid,
sketch_uuid=None, sketch_index=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct a single curve in a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
curve_data = sketch_data["curves"][curve_uuid]
points_data = sketch_data["points"]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
self.reconstruct_sketch_curve(
sketch,
curve_data,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
adsk.doEvents()
return sketch
def reconstruct_curves(self, sketch_data, sketch_name,
sketch_uuid=None, sketch_index=None,
transform=None, reconstruct_cb=None,
reconstruction=None):
"""Reconstruct all curves in a given sketch"""
self.reconstruct_cb = reconstruct_cb
self.reconstruction = reconstruction
if self.reconstruction is None:
self.reconstruction = self.design.rootComponent
points_data = sketch_data["points"]
sketches = self.reconstruction.sketches
sketch = sketches.itemByName(sketch_name)
if transform is None:
transform = adsk.core.Matrix3D.create()
# Turn off sketch compute until we add all the curves
sketch.isComputeDeferred = True
for curve_uuid, curve_data in sketch_data["curves"].items():
self.reconstruct_sketch_curve(
sketch,
curve_data,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
sketch.isComputeDeferred = False
adsk.doEvents()
return sketch
# --------------------------------------------------------
# SKETCH FEATURE
# --------------------------------------------------------
def get_extrude_profiles(self, timeline, entities):
"""Get the profiles used with extrude operations"""
profiles = set()
sketches = set()
for timeline_object in timeline:
entity_key = timeline_object["entity"]
entity = entities[entity_key]
if entity["type"] == "ExtrudeFeature":
for profile in entity["profiles"]:
profiles.add(profile["profile"])
sketches.add(profile["sketch"])
return {
"profiles": profiles,
"sketches": sketches
}
def find_profile(self, reconstructed_profiles, profile_uuid, profile_data, transform):
# Sketch profiles are automatically generated by Fusion
# After we have added the curves we have to traverse the profiles
# to find one with all of the curve uuids from the original
sorted_curve_uuids = self.get_curve_uuids(profile_data)
# print(f"Finding profile {profile_uuid} with {len(sorted_curve_uuids)} curves")
for index, profile_dict in enumerate(reconstructed_profiles):
profile = profile_dict["profile"]
profile_index = profile_dict["profile_index"]
sorted_found_curve_uuids = profile_dict["curve_uuids"]
if sorted_found_curve_uuids == sorted_curve_uuids and self.are_profile_properties_identical(profile, profile_data, transform):
# print(f"Profile found with {len(sorted_curve_uuids)} curve uuids")
return profile_dict, index
# print(f"Profile not found: {profile_uuid} with {len(sorted_curve_uuids)} curves")
return None, -1
def are_profile_properties_identical(self, profile, profile_data, transform):
profile_props = profile.areaProperties(adsk.fusion.CalculationAccuracy.HighCalculationAccuracy)
tolerance = 0.000001
if not math.isclose(profile_props.area, profile_data["properties"]["area"], abs_tol=tolerance):
# print("Profile area doesn't match")
return False
if not math.isclose(profile_props.perimeter, profile_data["properties"]["perimeter"], abs_tol=tolerance):
# print("Profile perimeter doesn't match")
return False
centroid_point = deserialize.point3d(profile_data["properties"]["centroid"])
centroid_point.transformBy(transform)
if not math.isclose(profile_props.centroid.x, centroid_point.x, abs_tol=tolerance):
# print("Centroid.x doesn't match")
return False
if not math.isclose(profile_props.centroid.y, centroid_point.y, abs_tol=tolerance):
# print("Centroid.y doesn't match")
return False
if not math.isclose(profile_props.centroid.z, centroid_point.z, abs_tol=tolerance):
# print("Centroid.z doesn't match")
return False
return True
def get_profile_curve_uuids(self, sketch, sketch_uuid):
reconstructed_profiles = []
for profile_index, profile in enumerate(sketch.profiles):
# We use a set as there can be duplicate curves in the list
found_curve_uuids = set()
for loop in profile.profileLoops:
for curve in loop.profileCurves:
sketch_ent = curve.sketchEntity
curve_uuid = self.get_uuid(sketch_ent)
if curve_uuid is not None:
found_curve_uuids.add(curve_uuid)
sorted_found_curve_uuids = sorted(list(found_curve_uuids))
reconstructed_profiles.append({
"profile": profile,
"profile_index": profile_index,
"sketch": sketch,
"sketch_uuid": sketch_uuid,
"curve_uuids": sorted_found_curve_uuids
})
return reconstructed_profiles
def get_uuid(self, entity):
uuid_att = entity.attributes.itemByName("Dataset", "uuid")
if uuid_att is not None:
return uuid_att.value
else:
return None
def set_uuid(self, entity, unique_id):
uuid_att = entity.attributes.itemByName("Dataset", "uuid")
if uuid_att is None:
entity.attributes.add("Dataset", "uuid", unique_id)
def get_curve_uuids(self, profile_data):
loops = profile_data["loops"]
# Use a set to remove duplicates
curve_uuids = set()
for loop in loops:
profile_curves = loop["profile_curves"]
for profile_curve in profile_curves:
curve_uuids.add(profile_curve["curve"])
return sorted(list(curve_uuids))
def find_transform_for_sketch_geom(self, sketch_transform, original_transform_json):
# The sketch transform operates on a sketch point p_sketch and transforms it into
# world space (or at least the space of the assembly context)
#
# p_world = T * p_sketch
#
# Now we need to cope with the sketch plane having two different transforms when we
# extract and when we import it.
#
# We know the one thing which stays constant is the final point in world space, so
# we have
#
# p_world = T_extract * p_sketch = T_import * T_correction * p_sketch
#
# hence
#
# T_extract = T_import * T_correction
#
# Now premultiplying both sides by T_import^-1 gives us
#
# T_correction = T_import^-1 * T_extract
#
# This function need to compute T_correction
# sketch_transform is T_import. Here we find T_import^-1
ok = sketch_transform.invert()
assert ok
# Set xform = T_extract
xform = deserialize.matrix3d(original_transform_json)
# The transformBy() function must be "premultiply"
# so here we have
# xform = T_import^-1 * T_extract
xform.transformBy(sketch_transform)
return xform
def reconstruct_sketch_feature(self, sketch_data, sketch_profiles,
sketch_uuid=None, sketch_index=None,
sketch_plane=None, transform=None):
# Skip empty sketches
if ("curves" not in sketch_data or "profiles" not in sketch_data or
"points" not in sketch_data):
return None
sketches = self.reconstruction.sketches
# Find the right sketch plane to use
if sketch_plane is None:
sketch_plane = self.get_sketch_plane(sketch_data["reference_plane"], sketch_profiles)
sketch = sketches.addWithoutEdges(sketch_plane)
# If we want to manually overide the transform we can
# but the sketch may be flipped without the call to
# find_transform_for_sketch_geom()
if transform is not None:
transform_for_sketch_geom = transform
else:
# We need to apply some other transform to the sketch data
# as sketch geometry created via the UI has a slightly different
# coordinate system when created via the API
# This applies when the sketch plane references other geometry
# like a B-Rep face
transform_for_sketch_geom = adsk.core.Matrix3D.create()
sketch_transform = sketch.transform
transform_for_sketch_geom = self.find_transform_for_sketch_geom(sketch_transform, sketch_data["transform"])
if self.reconstruct_cb is not None:
cb_data = {
"sketch": sketch,
"sketch_name": sketch_data["name"],
"corrective_transform": transform_for_sketch_geom
}
if sketch_uuid is not None:
cb_data["sketch_uuid"] = sketch_uuid
self.reconstruct_cb(cb_data)
# Draw exactly what the user drew and then search for the profiles
new_sketch_profiles = self.reconstruct_curves_to_profiles(sketch, sketch_data, sketch_uuid, sketch_index, transform_for_sketch_geom)
adsk.doEvents()
return sketch, new_sketch_profiles
def get_sketch_plane(self, reference_plane, sketch_profiles):
# ConstructionPlane as reference plane
if reference_plane["type"] == "ConstructionPlane" and "name" in reference_plane:
sketch_plane = deserialize.construction_plane(reference_plane["name"])
if sketch_plane is not None:
return sketch_plane
# BRepFace as reference plane
elif reference_plane["type"] == "BRepFace" and "point_on_face" in reference_plane:
face = deserialize.face_by_point3d(reference_plane["point_on_face"])
if face is not None:
if face.geometry.surfaceType == adsk.core.SurfaceTypes.PlaneSurfaceType:
return face
else:
print(f"Sketch plane (BRepFace) - invalid surface type {face.geometry.surfaceType}")
else:
print("Sketch plane point on face not found!")
# Sketch Profile as reference plane
elif reference_plane["type"] == "Profile" and "profile" in reference_plane:
profile_uuid = reference_plane["profile"]
# We could reference the original sketch plane like this:
# return profile.parentSketch.referencePlane
# But the sketch plane can differ from the profile plane
# so we go ahead and find the actual profile plane
sketch_profile = self.get_sketch_profile_reference(profile_uuid, sketch_profiles)
if sketch_profile is not None:
# Note: The API doesn't support creating references
# to sketch profiles directly
# So instead we create a construction plane from the profile
# and use that
# This preserves the reference indirectly
# through the construction plane
planes = self.reconstruction.constructionPlanes
plane_input = planes.createInput()
offset_distance = adsk.core.ValueInput.createByReal(0)
plane_input.setByOffset(sketch_profile, offset_distance)
plane = planes.add(plane_input)
return plane
return self.reconstruction.xYConstructionPlane
def reconstruct_curves_to_profiles(self, sketch, sketch_data, sketch_uuid, sketch_index, transform):
# Turn off sketch compute until we add all the curves
sketch.isComputeDeferred = True
self.reconstruct_sketch_curves(sketch, sketch_data, sketch_uuid, sketch_index, transform)
sketch.isComputeDeferred = False
# If we draw the user curves
# we have to recover the profiles that Fusion generates
# First pull out the list of reconstructed profile curve uuids
reconstructed_profiles = self.get_profile_curve_uuids(sketch, sketch_uuid)
sketch_profiles = {}
missing_profiles = {}
# We first try and find exact matches
# i.e. a profile with the same set of (deduplicated) curve ids
# and with an area/perimeter/centroid that matches
for profile_uuid, profile_data in sketch_data["profiles"].items():
# print("Finding profile", profile_data["profile_uuid"])
sketch_profile_data, reconstructed_profile_index = self.find_profile(
reconstructed_profiles, profile_uuid, profile_data, transform
)
if sketch_profile_data is not None:
sketch_profiles[profile_uuid] = sketch_profile_data
# Remove the matched profile from the pool
del reconstructed_profiles[reconstructed_profile_index]
else:
missing_profiles[profile_uuid] = profile_data
# Sometimes the exact match will fail,
# so we search for the most 'similar' profile,
# with the most common curve uuids,
# remaining in the reconstructed profile set
missing_profile_count = len(missing_profiles)
if missing_profile_count > 0:
print(f"{missing_profile_count} Missing profiles and {len(reconstructed_profiles)} remaining reconstructed profiles")
matched_profiles = 0
for missing_profile_uuid, missing_profile_data in missing_profiles.items():
best_match_profile_data = self.get_closest_profile(
missing_profile_data, reconstructed_profiles, missing_profile_uuid
)
if best_match_profile_data is not None:
sketch_profiles[missing_profile_uuid] = best_match_profile_data
matched_profiles += 1
unmatched_profiles = missing_profile_count - matched_profiles
if unmatched_profiles > 0:
print(f"{unmatched_profiles} left over unmatched profiles!")
return sketch_profiles
def get_closest_profile(self, missing_profile_data, reconstructed_profiles, missing_profile_uuid):
"""Try and find the closest profile match based on overlap of curve ids"""
if len(reconstructed_profiles) == 1:
return reconstructed_profiles[0]
sorted_curve_uuids = self.get_curve_uuids(missing_profile_data)
sorted_curve_uuids_count = len(sorted_curve_uuids)
max_score = 0
best_match_index = -1
for index, reconstructed_profile in enumerate(reconstructed_profiles):
overlap = self.get_profile_curve_overlap_count(sorted_curve_uuids, reconstructed_profile["curve_uuids"])
reconstructed_profile_curve_uuids_coint = len(reconstructed_profile["curve_uuids"])
score = overlap - abs(reconstructed_profile_curve_uuids_coint-sorted_curve_uuids_count)
if score > max_score:
best_match_index = index
max_score = score
if best_match_index >= 0:
print(f"""Matching profile {missing_profile_uuid} with {sorted_curve_uuids_count} curves
to a left over reconstructed profile with {len(reconstructed_profiles[best_match_index]["curve_uuids"])} curves""")
return reconstructed_profiles[best_match_index]
else:
return None
def get_profile_curve_overlap_count(self, original, reconstructed):
intersection = set(original) & set(reconstructed)
return len(intersection)
def reconstruct_sketch_curves(self, sketch, sketch_data, sketch_uuid, sketch_index, transform):
"""Reconstruct the sketch curves in profile order"""
curves_data = sketch_data["curves"]
points_data = sketch_data["points"]
profiles_data = sketch_data["profiles"]
current_curves_data = OrderedDict(curves_data)
# curve_keys = curves_data.keys()
# Redraw the curves in the order of the profiles
for profile_uuid, profile in profiles_data.items():
for loop in profile["loops"]:
for profile_curve in loop["profile_curves"]:
curve_uuid = profile_curve["curve"]
# Only draw the curves that haven't been draw already
if curve_uuid in current_curves_data:
curve = curves_data[curve_uuid]
self.reconstruct_sketch_curve(
sketch,
curve,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
# Remove the curve from list of curves to draw
del current_curves_data[curve_uuid]
# Next add the remaining curves not used in profiles
for curve_uuid, curve in current_curves_data.items():
self.reconstruct_sketch_curve(
sketch,
curve,
curve_uuid,
points_data,
transform=transform,
sketch_uuid=sketch_uuid,
sketch_index=sketch_index
)
def get_sketch_profile_reference(self, profile_uuid, sketch_profiles):
"""Return a reference to the sketch profile from our stored dict"""
if profile_uuid not in sketch_profiles:
return None
# We have a reference we stored directly of the profile
# sketch_profile = sketch_profiles[profile_uuid]["profile"]
# But this reference to the profile fails if you toggle
# visibility of the sketch off after the reference is created
# as we do to generate image output of the sequence
# So instead we find the reference again via the sketch
sketch = sketch_profiles[profile_uuid]["sketch"]
sketch_profile_index = sketch_profiles[profile_uuid]["profile_index"]
sketch_profile = sketch.profiles[sketch_profile_index]
return sketch_profile
# --------------------------------------------------------
# PROFILE CURVES
# --------------------------------------------------------
def reconstruct_sketch_curve(self, sketch, curve_data, curve_uuid, points_data,
transform=None, sketch_uuid=None,
sketch_index=None):
"""Reconstruct a sketch curve"""
if curve_data["construction_geom"]:
return
if transform is None:
transform = adsk.core.Matrix3D.create()
if curve_data["type"] == "SketchLine":
curve_obj = self.reconstruct_sketch_line(
sketch.sketchCurves.sketchLines,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchArc":
curve_obj = self.reconstruct_sketch_arc(
sketch.sketchCurves.sketchArcs,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchCircle":
curve_obj = self.reconstruct_sketch_circle(
sketch.sketchCurves.sketchCircles,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchEllipse":
curve_obj = self.reconstruct_sketch_ellipse(
sketch.sketchCurves.sketchEllipses,
curve_data, curve_uuid, points_data, transform
)
elif curve_data["type"] == "SketchFittedSpline":
curve_obj = self.reconstruct_sketch_fitted_spline(
sketch.sketchCurves.sketchFittedSplines,
curve_data, curve_uuid, transform
)
else:
raise Exception(f"Unsupported curve type: {curve_data['type']}")
if self.reconstruct_cb is not None:
cb_data = {
"sketch": sketch,
"sketch_name": sketch.name,
"curve": curve_obj,
"curve_uuid": curve_uuid
}
if sketch_uuid is not None:
cb_data["sketch_id"] = sketch_uuid
if sketch_index is not None:
cb_data["sketch_index"] = sketch_index
self.reconstruct_cb(cb_data)
def reconstruct_sketch_line(self, sketch_lines, curve_data, curve_uuid, points_data, transform):
start_point_uuid = curve_data["start_point"]
end_point_uuid = curve_data["end_point"]
start_point = deserialize.point3d(points_data[start_point_uuid])
end_point = deserialize.point3d(points_data[end_point_uuid])
start_point.transformBy(transform)
end_point.transformBy(transform)
line = sketch_lines.addByTwoPoints(start_point, end_point)
self.set_uuid(line, curve_uuid)
return line
def reconstruct_sketch_arc(self, sketch_arcs, curve_data, curve_uuid, points_data, transform):
start_point_uuid = curve_data["start_point"]
center_point_uuid = curve_data["center_point"]
start_point = deserialize.point3d(points_data[start_point_uuid])
center_point = deserialize.point3d(points_data[center_point_uuid])
start_point.transformBy(transform)
center_point.transformBy(transform)
sweep_angle = curve_data["end_angle"] - curve_data["start_angle"]
arc = sketch_arcs.addByCenterStartSweep(center_point, start_point, sweep_angle)
self.set_uuid(arc, curve_uuid)
return arc
def reconstruct_sketch_circle(self, sketch_circles, curve_data, curve_uuid, points_data, transform):
center_point_uuid = curve_data["center_point"]
center_point = deserialize.point3d(points_data[center_point_uuid])
center_point.transformBy(transform)
radius = curve_data["radius"]
circle = sketch_circles.addByCenterRadius(center_point, radius)
self.set_uuid(circle, curve_uuid)
return circle
def reconstruct_sketch_ellipse(self, sketch_ellipses, curve_data, curve_uuid, points_data, transform):
# Ellipse reconstruction requires us to provide 3 points:
# - Center point
# - Major axis point
# - (Minor axis) point that the ellipse will pass through
# Center point
center_point_uuid = curve_data["center_point"]
center_point = deserialize.point3d(points_data[center_point_uuid])
center_point_vector = center_point.asVector()
# Major axis point
# Take the vector for the major axis
# then normalize it
# then scale it to the radius of the major axis
# and offset by the center point
major_axis = deserialize.vector3d(curve_data["major_axis"])
major_axis_radius = curve_data["major_axis_radius"]
major_axis.normalize()
major_axis_vector = major_axis.copy()
major_axis_vector.scaleBy(major_axis_radius)
major_axis_point = major_axis_vector.asPoint()
major_axis_point.translateBy(center_point_vector)
# Minor axis point
# Rotate 90 deg around z from the major axis
# then scale and offset by the center point
minor_axis_radius = curve_data["minor_axis_radius"]
rot_matrix = adsk.core.Matrix3D.create()
origin = adsk.core.Point3D.create()
axis = adsk.core.Vector3D.create(0.0, 0.0, 1.0)
rot_matrix.setToRotation(math.radians(90), axis, origin)
minor_axis = major_axis.copy()
minor_axis.transformBy(rot_matrix)
minor_axis_vector = minor_axis.copy()
minor_axis_vector.scaleBy(minor_axis_radius)
minor_axis_point = minor_axis_vector.asPoint()
minor_axis_point.translateBy(center_point_vector)
# Finally apply the sketch alignment matrix
major_axis_point.transformBy(transform)
minor_axis_point.transformBy(transform)
center_point.transformBy(transform)
ellipse = sketch_ellipses.add(center_point, major_axis_point, minor_axis_point)
self.set_uuid(ellipse, curve_uuid)
return ellipse
def reconstruct_sketch_fitted_spline(self, sketch_fitted_splines, curve_data, curve_uuid, transform):
nurbs_curve = self.get_nurbs_curve(curve_data, transform)
spline = sketch_fitted_splines.addByNurbsCurve(nurbs_curve)
self.set_uuid(spline, curve_uuid)
return spline
def get_nurbs_curve(self, curve_data, transform):
control_points = deserialize.point3d_list(curve_data["control_points"], transform)
nurbs_curve = None
if curve_data["rational"] is True:
nurbs_curve = adsk.core.NurbsCurve3D.createRational(
control_points, curve_data["degree"],
curve_data["knots"], curve_data["weights"],
curve_data["periodic"]
)
else:
nurbs_curve = adsk.core.NurbsCurve3D.createNonRational(
control_points, curve_data["degree"],
curve_data["knots"], curve_data["periodic"]
)
return nurbs_curve
# --------------------------------------------------------
# TRIMMED PROFILE CURVES
# --------------------------------------------------------
def reconstruct_trimmed_curves(self, sketch, profile_data, transform):
loops = profile_data["loops"]
for loop in loops:
profile_curves = loop["profile_curves"]
for curve_data in profile_curves:
self.reconstruct_trimmed_curve(sketch, curve_data, transform)
def reconstruct_trimmed_curve(self, sketch, curve_data, transform):
if curve_data["type"] == "Line3D":
self.reconstruct_line(
sketch.sketchCurves.sketchLines, curve_data, transform
)
elif curve_data["type"] == "Arc3D":
self.reconstruct_arc(
sketch.sketchCurves.sketchArcs, curve_data, transform
)
elif curve_data["type"] == "Circle3D":
self.reconstruct_circle(
sketch.sketchCurves.sketchCircles, curve_data, transform
)
elif curve_data["type"] == "Ellipse3D":
self.reconstruct_ellipse(
sketch.sketchCurves.sketchEllipses, curve_data, transform
)
elif curve_data["type"] == "NurbsCurve3D":
self.reconstruct_nurbs_curve(
sketch.sketchCurves.sketchFittedSplines, curve_data, transform
)
else:
raise Exception(f"Unsupported curve type: {curve_data['type']}")
def reconstruct_line(self, sketch_lines, curve_data, transform):
start_point = deserialize.point3d(curve_data["start_point"])
start_point.transformBy(transform)
end_point = deserialize.point3d(curve_data["end_point"])
end_point.transformBy(transform)
line = sketch_lines.addByTwoPoints(start_point, end_point)
self.set_uuid(line, curve_data["curve"])
return line
def reconstruct_arc(self, sketch_arcs, curve_data, transform):
start_point = deserialize.point3d(curve_data["start_point"])
start_point.transformBy(transform)
center_point = deserialize.point3d(curve_data["center_point"])
center_point.transformBy(transform)
sweep_angle = curve_data["end_angle"] - curve_data["start_angle"]
arc = sketch_arcs.addByCenterStartSweep(center_point, start_point, sweep_angle)
self.set_uuid(arc, curve_data["curve"])
return arc
def reconstruct_circle(self, sketch_circles, curve_data, transform):
center_point = deserialize.point3d(curve_data["center_point"])
center_point.transformBy(transform)
radius = curve_data["radius"]
circle = sketch_circles.addByCenterRadius(center_point, radius)
self.set_uuid(circle, curve_data["curve"])
return circle
def reconstruct_ellipse(self, sketch_ellipses, curve_data, transform):
# Ellipse reconstruction requires us to provide 3 points:
# - Center point
# - Major axis point
# - (Minor axis) point that the ellipse will pass through
# Center point
center_point = deserialize.point3d(curve_data["center_point"])
center_point_vector = center_point.asVector()
# Major axis point
# Take the vector for the major axis
# then normalize it
# then scale it to the radius of the major axis
# and offset by the center point
major_axis = deserialize.vector3d(curve_data["major_axis"])
major_axis_radius = curve_data["major_axis_radius"]
major_axis.normalize()
major_axis_vector = major_axis.copy()
major_axis_vector.scaleBy(major_axis_radius)
major_axis_point = major_axis_vector.asPoint()
major_axis_point.translateBy(center_point_vector)
# Minor axis point
# Rotate 90 deg around z from the major axis
# then scale and offset by the center point
minor_axis_radius = curve_data["minor_axis_radius"]
rot_matrix = adsk.core.Matrix3D.create()
origin = adsk.core.Point3D.create()
axis = adsk.core.Vector3D.create(0.0, 0.0, 1.0)
rot_matrix.setToRotation(math.radians(90), axis, origin)
minor_axis = major_axis.copy()
minor_axis.transformBy(rot_matrix)
minor_axis_vector = minor_axis.copy()
minor_axis_vector.scaleBy(minor_axis_radius)
minor_axis_point = minor_axis_vector.asPoint()
minor_axis_point.translateBy(center_point_vector)
# Finally apply the sketch alignment matrix
major_axis_point.transformBy(transform)
minor_axis_point.transformBy(transform)
center_point.transformBy(transform)
ellipse = sketch_ellipses.add(center_point, major_axis_point, minor_axis_point)
self.set_uuid(ellipse, curve_data["curve"])
return ellipse
def reconstruct_nurbs_curve(self, sketch_fitted_splines, curve_data, transform):
nurbs_curve = self.get_nurbs_curve(curve_data, transform)
spline = sketch_fitted_splines.addByNurbsCurve(nurbs_curve)
self.set_uuid(spline, curve_data["curve"])
return spline
# --------------------------------------------------------
# EXTRUDE FEATURE
# --------------------------------------------------------
def reconstruct_extrude_feature(self, extrude_data, extrude_uuid, extrude_index, sketch_profiles):
extrudes = self.reconstruction.features.extrudeFeatures
# There can be more than one profile, so we create an object collection
extrude_profiles = adsk.core.ObjectCollection.create()
for profile in extrude_data["profiles"]:
profile_uuid = profile["profile"]
sketch_profile = self.get_sketch_profile_reference(profile_uuid, sketch_profiles)
extrude_profiles.add(sketch_profile)
# The operation defines if the extrusion becomes a new body
# a new component or cuts/joins another body (i.e. boolean operation)
operation = deserialize.feature_operations(extrude_data["operation"])
extrude_input = extrudes.createInput(extrude_profiles, operation)
# Simple extrusion in one direction
if extrude_data["extent_type"] == "OneSideFeatureExtentType":
self.set_one_side_extrude_input(extrude_input, extrude_data["extent_one"])
# Extrusion in two directions with different distances
elif extrude_data["extent_type"] == "TwoSidesFeatureExtentType":
self.set_two_side_extrude_input(extrude_input, extrude_data["extent_one"], extrude_data["extent_two"])
# Symmetrical extrusion by the same distance on each side
elif extrude_data["extent_type"] == "SymmetricFeatureExtentType":
self.set_symmetric_extrude_input(extrude_input, extrude_data["extent_one"])
# The start extent is initialized to be the profile plane
# but we may need to change it to an offset
# after all other changes
self.set_start_extent(extrude_input, extrude_data["start_extent"])
extrude = extrudes.add(extrude_input)
if self.reconstruct_cb is not None:
self.reconstruct_cb({
"extrude": extrude,
"extrude_name": extrude_data["name"],
"extrude_id": extrude_uuid,
"extrude_index": extrude_index
})
return extrude
def set_start_extent(self, extrude_input, start_extent):
# Only handle the offset case
# ProfilePlaneStartDefinition is already setup
# and other cases we don't handle
if start_extent["type"] == "OffsetStartDefinition":
offset_distance = adsk.core.ValueInput.createByReal(start_extent["offset"]["value"])
offset_start_def = adsk.fusion.OffsetStartDefinition.create(offset_distance)
extrude_input.startExtent = offset_start_def
def set_one_side_extrude_input(self, extrude_input, extent_one):
distance = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
extent_distance = adsk.fusion.DistanceExtentDefinition.create(distance)
taper_angle = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
extrude_input.setOneSideExtent(extent_distance, adsk.fusion.ExtentDirections.PositiveExtentDirection, taper_angle)
def set_two_side_extrude_input(self, extrude_input, extent_one, extent_two):
distance_one = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
distance_two = adsk.core.ValueInput.createByReal(extent_two["distance"]["value"])
extent_distance_one = adsk.fusion.DistanceExtentDefinition.create(distance_one)
extent_distance_two = adsk.fusion.DistanceExtentDefinition.create(distance_two)
taper_angle_one = adsk.core.ValueInput.createByReal(0)
taper_angle_two = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle_one = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
if "taper_angle" in extent_two:
taper_angle_two = adsk.core.ValueInput.createByReal(extent_two["taper_angle"]["value"])
extrude_input.setTwoSidesExtent(extent_distance_one, extent_distance_two, taper_angle_one, taper_angle_two)
def set_symmetric_extrude_input(self, extrude_input, extent_one):
# SYMMETRIC EXTRUDE
# Symmetric extent is currently buggy when a taper is applied
# So instead we use a two sided extent with symmetry
# Note that the distance is not a DistanceExtentDefinition
# distance = adsk.core.ValueInput.createByReal(extent_one["distance"]["value"])
# taper_angle = adsk.core.ValueInput.createByReal(0)
# if "taper_angle" in extent_one:
# taper_angle = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
# is_full_length = extent_one["is_full_length"]
# extrude_input.setSymmetricExtent(distance, is_full_length, taper_angle)
#
# TWO SIDED EXTRUDE WORKAROUND
distance = extent_one["distance"]["value"]
if extent_one["is_full_length"]:
distance = distance * 0.5
distance_one = adsk.core.ValueInput.createByReal(distance)
distance_two = adsk.core.ValueInput.createByReal(distance)
extent_distance_one = adsk.fusion.DistanceExtentDefinition.create(distance_one)
extent_distance_two = adsk.fusion.DistanceExtentDefinition.create(distance_two)
taper_angle_one = adsk.core.ValueInput.createByReal(0)
taper_angle_two = adsk.core.ValueInput.createByReal(0)
if "taper_angle" in extent_one:
taper_angle_one = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
taper_angle_two = adsk.core.ValueInput.createByReal(extent_one["taper_angle"]["value"])
extrude_input.setTwoSidesExtent(extent_distance_one, extent_distance_two, taper_angle_one, taper_angle_two)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
from numpy.fft import fft
from scipy.optimize import least_squares
from scipy.signal.lti_conversion import abcd_normalize
from scipy.signal.ltisys import dlsim
from pyvib.common import lm, mmul_weight, weightfcn
from .lti_conversion import discrete2cont, ss2phys
from .modal import modal_ac
def _atleast_2d_or_none(arg):
if arg is not None:
return np.atleast_2d(arg)
class StateSpace():
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
self.inputs = None
self.outputs = None
self._dt = None
self.T1, self.T2 = [None]*2
self.n, self.m, self.p = [0]*3
sys = system
dt = kwargs.pop('dt', True)
super().__init__(**kwargs)
self._A, self._B, self._C, self._D = [None]*4
self.Ac, self.Bc = [None]*2
self.dt = dt
if len(system) == 1: # TODO fix and isinstance(system[0], StateSpace):
sys = system[0]
if isinstance(sys, StateSpace):
sys = sys.A, sys.B, sys.C, sys.D
if len(sys) == 4:
self.A, self.B, self.C, self.D = abcd_normalize(*sys)
else:
pass
#raise ValueError(f'Wrong initialization of SS {type(system)}')
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return (f'{self.__class__.__name__},\n'
f'{repr(self.A)},\n'
f'{repr(self.B)},\n'
f'{repr(self.C)},\n'
f'{repr(self.D)},\n'
f'dt: {repr(self.dt)}')
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
self.n = self.A.shape[0]
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.m = self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.p = self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
@property
def npar(self):
n, m, p = self.n, self.m, self.p
return n**2 + n*m + p*n + p*m
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def _copy(self, *system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
if len(system) == 1 and isinstance(system[0], StateSpace):
A, B, C, D, dt = (system.A, system.B, system.C, system.D, system.dt)
elif len(system) == 4:
A, B, C, D = system
dt = self.dt
else:
raise ValueError('Cannot copy the given system')
self.A = A
self.B = B
self.C = C
self.D = D
self.dt = dt
def _get_shape(self):
# n, m, p
return self.A.shape[0], self.B.shape[1], self.C.shape[0]
def _get_system(self):
return (self.A, self.B, self.C, self.D, self.dt)
def extract(self, x0):
n, m, p = self.n, self.m, self.p
A = x0.flat[:n**2].reshape((n,n))
B = x0.flat[n**2 + np.r_[:n*m]].reshape((n,m))
C = x0.flat[n**2+n*m + np.r_[:p*n]].reshape((p,n))
D = x0.flat[n*(p+m+n):].reshape((p,m))
return A, B, C, D
def flatten(self):
"""Returns the state space as flattened array"""
n, m, p = self.n, self.m, self.p
npar = n**2 + n*m + p*n + p*m
x0 = np.empty(npar)
x0[:n**2] = self.A.ravel()
x0[n**2 + np.r_[:n*m]] = self.B.ravel()
x0[n**2 + n*m + np.r_[:n*p]] = self.C.ravel()
x0[n**2 + n*m + n*p:] = self.D.ravel()
return x0
def transient(self, T1=None, T2=None):
"""Transient handling. t1: periodic, t2: aperiodic
Get transient index. Only needed to run once
"""
self.T1 = T1
self.T2 = T2
sig = self.signal
ns = sig.R * sig.npp
if T1 is not None:
# Extract the transient part of the input
self.idx_trans = transient_indices_periodic(T1, ns)
self.idx_remtrans = remove_transient_indices_periodic(T1, ns,
self.p)
else:
self.idx_trans = np.s_[:ns]
self.idx_remtrans = np.s_[:ns]
if T2 is not None:
self.without_T2, NT = remove_transient_indices_nonperiodic(T2,ns,self.p)
else:
self.without_T2 = np.s_[:ns]
def output(self, u, t=None, x0=None):
system = self._get_system()
return dlsim(system, u, t=t, x0=x0)
def simulate(self, u, t=None, x0=None, T1=None, T2=None):
"""
Return the response of the discrete-time system to input `u` with
transient handling.
See :func:`scipy.signal.dlsim` for details.
"""
# Number of samples
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
ns = u.shape[0]
if T1 is None:
T1 = self.T1
T2 = self.T2
if T1 is not None:
idx = self.idx_trans
else:
idx = transient_indices_periodic(T1, ns)
if T1 is not None:
# Prepend transient samples to the input
u = u[idx]
t, y, x = self.output(u, t=t, x0=x0)
if T1 is not None:
# remove transient samples. p=1 is correct. TODO why?
idx = remove_transient_indices_periodic(T1, ns, p=1)
x = x[idx]
y = y[idx]
t = t[idx]
# save output
self.x_mod = x
self.y_mod = y
return t, y, x
def to_cont(self, method='zoh', alpha=None):
"""convert to cont. time. Only A and B changes"""
self.Ac, self.Bc, *_ = \
discrete2cont(self.A, self.B, self.C, self.D, self.dt,
method=method, alpha=alpha)
@property
def modal(self, update=False):
"""Calculate modal properties using cont. time matrices"""
if self.Ac is None or update is True:
self.to_cont()
return modal_ac(self.Ac, self.C)
def to_phys(self, update=False):
"""Calculate state space matrices in physical domain using a similarity
transform T
"""
# returns A, B, C, T. T is similarity transform
if self.Ac is None or update is True:
self.to_cont()
return ss2phys(self.Ac, self.Bc, self.C)
class NonlinearStateSpace(StateSpace):
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
sys = system
E = np.array([])
F = np.array([])
if len(system) == 6:
E, F = system[4:6]
sys = system[0:4]
super().__init__(*sys,**kwargs)
self.E, self.F = E, F
def __repr__(self):
rep = super().__repr__()
idt = rep.rfind('dt')
inl = rep.find('\n')+1
return (f'{self.__class__.__name__},\n' +
rep[inl:idt] +
f'{repr(self.E)},\n'
f'{repr(self.F)},\n'
f'dt: {repr(self.dt)}')
@property
def E(self):
"""State matrix of the `StateSpace` system."""
return self._E
@E.setter
def E(self, E):
self._E = _atleast_2d_or_none(E)
@property
def F(self):
"""Input matrix of the `StateSpace` system."""
return self._F
@F.setter
def F(self, F):
self._F = _atleast_2d_or_none(F)
@property
def npar(self):
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
n, m, p = self.n, self.m, self.p
return n**2 + n*m + p*n + p*m + ne + nf
def _get_system(self):
return (self.A, self.B, self.C, self.D, self.E, self.F, self.dt)
def _copy(self, *system):
if len(system) == 1 and isinstance(system[0], NonlinearStateSpace):
A, B, C, D, E, F, dt = (system.A, system.B, system.C, system.D,
system.E, system.F, system.dt)
elif len(system) == 6:
A, B, C, D, E, F = system
dt = self.dt
else:
raise ValueError(f'Cannot copy the given system {type(system)}')
self.A, self.B, self.C, self.D, self.E, self.F, self.dt = \
A, B, C, D, E, F, dt
def to_cont(self, method='zoh', alpha=None):
"""convert to cont. time. Only A, B and E changes"""
Bext = np.hstack((self.B, self.E))
Dext = np.hstack((self.D, self.F))
self.Ac, Bcext, *_ = \
discrete2cont(self.A, Bext, self.C, Dext, self.dt,
method=method, alpha=alpha)
self.Bc = Bcext[:,:self.m]
self.Ec = Bcext[:,self.m:]
@property
def weight(self):
if self._weight is None:
self._weight = weightfcn(self.signal.covY)
return self._weight
def costfcn(self, x0=None, weight=False):
if weight is True:
weight = self.weight
if x0 is None:
x0 = self.flatten()
return costfcn_time(x0, self, weight=weight)
def extract(self, x0):
"""Extract state space from from flattened array"""
n, m, p = self.n, self.m, self.p
# index of active elements
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
E = self.E
F = self.F
A = x0.flat[:n**2].reshape((n,n))
B = x0.flat[n**2 + np.r_[:n*m]].reshape((n,m))
C = x0.flat[n**2+n*m + np.r_[:p*n]].reshape((p,n))
D = x0.flat[n*(p+m+n) + np.r_[:p*m]].reshape((p,m))
E.flat[xact] = x0.flat[n*(p+m+n)+p*m + np.r_[:ne]]
F.flat[yact] = x0.flat[n*(p+m+n)+p*m+ne + np.r_[:nf]]
return A, B, C, D, E, F
def flatten(self):
"""Returns the state space as flattened array"""
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
n, m, p = self.n, self.m, self.p
npar = n**2 + n*m + p*n + p*m + ne + nf
x0 = np.empty(npar)
x0[:n**2] = self.A.ravel()
x0[n**2 + np.r_[:n*m]] = self.B.ravel()
x0[n**2 + n*m + np.r_[:n*p]] = self.C.ravel()
x0[n*(p+m+n) + np.r_[:p*m]] = self.D.ravel()
x0[n*(p+m+n)+p*m + np.r_[:ne]] = self.E.flat[xact]
x0[n*(p+m+n)+p*m+ne + np.r_[:nf]] = self.F.flat[yact]
return x0
class StateSpaceIdent():
def __init__(self):
self._weight = None
def cost(self, x0=None, weight=False):
if weight is True:
weight = self.weight
if x0 is None:
x0 = self.flatten()
err = self.costfcn(x0, weight=weight)
# TODO maybe divide by 2 to match scipy's implementation of minpack
return np.dot(err, err)
def optimize(self, method=None, weight=True, info=2, nmax=50, lamb=None,
ftol=1e-12, xtol=1e-12, gtol=1e-12, copy=False):
"""Optimize the estimated the nonlinear state space matrices"""
if weight is True:
weight = self.weight
self.freq_weight = True
if weight is False:
self.freq_weight = False
if info:
print(f'\nStarting {self.__class__.__name__} optimization')
x0 = self.flatten()
kwargs = {'weight':weight}
if method is None:
res = lm(fun=self.costfcn, x0=x0, jac=self.jacobian, info=info,
nmax=nmax, lamb=lamb, ftol=ftol, xtol=xtol, gtol=gtol,
kwargs=kwargs)
else:
res = least_squares(self.costfcn, x0, self.jacobian, method='lm',
x_scale='jac', kwargs=kwargs)
if copy:
# restore state space matrices to original
self._copy(*self.extract(x0))
nmodel = deepcopy(self)
nmodel._copy(*self.extract(res['x']))
nmodel.res = res
return nmodel
# update the model with the optimized SS matrices
self._copy(*self.extract(res['x']))
self.res = res
def extract_model(self, y, u, t=None, x0=None, T1=None, T2=None,
info=2, copy=False):
"""extract the best model using validation data"""
models = self.res['x_mat']
nmodels = models.shape[0]
ss0 = self.flatten()
err_rms = np.empty(nmodels)
if info:
print(f"{"model":5} | {"rms":12} |")
for i, ss in enumerate(models):
self._copy(*self.extract(ss))
tout, yout, xout = self.simulate(u, t=t, x0=x0, T1=T1, T2=T2)
err_rms[i] = np.sqrt(np.mean((y - yout)**2))
if info:
print(f"{i:5d} | {err_rms[i]:12.8g}")
# best model on new data set
i = np.nanargmin(err_rms)
if info:
print(f"best model is {i} with RMS {err_rms[i]:12.8g}")
ss = models[i]
if copy:
# restore state space matrices to original
self._copy(*self.extract(ss0))
nmodel = deepcopy(self)
nmodel._copy(*self.extract(ss))
return nmodel, err_rms
self._copy(*self.extract(ss))
return err_rms
def costfcn_time(x0, system, weight=False):
"""Compute the vector of residuals such that the function to mimimize is
res = ∑ₖ e[k]ᴴ*e[k], where the error is given by
e = weight*(ŷ - y)
and the weight is the square inverse of the covariance matrix of `y`
"""
# TODO fix transient
# T2 = system.T2
# p is the actual number of output in the signal, not the system output
R, p, npp = system.signal.R, system.signal.p, system.signal.npp
p = system.p
nfd = npp//2
# without_T2 = system.without_T2
# update the state space matrices from x0
# TODO find a way to avoid explicitly updating the state space model.
# It is not the expected behavior that calculating the cost should change
# the model! Right now it is done because simulating is using the systems
# ss matrices
system._copy(*system.extract(x0))
# Compute the (transient-free) modeled output and the corresponding states
t_mod, y_mod, x_mod = system.simulate(system.signal.um)
# Compute the (weighted) error signal without transient
if system.signal._ydm is not None:
ym = np.hstack((system.signal.ym, system.signal._ydm))
else:
ym = system.signal.ym
err = y_mod - ym #[without_T2, :p] - system.signal.ym[without_T2]
if weight is not False and system.freq_weight:
err = err.reshape((npp,R,p),order='F').swapaxes(1,2)
# Select only the positive half of the spectrum
err = fft(err, axis=0)[:nfd]
err = mmul_weight(err, weight)
#cost = np.vdot(err, err).real
err = err.swapaxes(1,2).ravel(order='F')
err_w = np.hstack((err.real.squeeze(), err.imag.squeeze()))
elif weight is not False:
# TODO time domain weighting. Does not work
err_w = err * weight # [without_T2]
#cost = np.dot(err,err)
else:
# no weighting
# TODO are we sure this is the right order?
return err.ravel(order='F')
return err_w
def transient_indices_periodic(T1,N):
"""Computes indices for transient handling of periodic signals.
Computes the indices to be used with a vector u of length N that contains
(several realizations of) a periodic signal, such that u[indices] has T1[0]
transient samples prepended to each realization. The starting samples of
each realization can be specified in T1[1:]. Like this, steady-state data
can be obtained from a PNLSS model by using u[indices] as an input signal
to a PNLSS model (see :meth:`pyvib.PNLSS.simulate`) and removing the
transient samples afterwards (see :func:`remove_transient_indices_periodic`
Parameters
----------
T1 : int | ndarray(int)
array that indicates how the transient is handled. The first element
T1[0] is the number of transient samples that should be prepended to
each realization. The other elements T1[1:] indicate the starting
sample of each realization in the signal. If T1 has only one element,
T1[1] is put to zero, ie. first element.
N : int
length of the signal containing all realizations
Returns
-------
indices : ndarray(int)
indices of a vector u that contains (several realizations of) a
periodic signal, such that u[indices] has a number of transient samples
added before each realization
Examples
--------
>>> npp = 1000 # Number of points per period
>>> R = 2 # Number of phase realizations
>>> T = 100 # Number of transient samples
>>> T1 = np.r_[T, np.r_[0:(R-1)*npp+1:npp]] # Transient handling vector
>>> N = R*npp # Total number of samples
>>> indices = transient_indices_periodic(T1,N)
indices = np.r_[900:1000, 0:1000, 1900:2000, 1000:2000]
= [transient samples realization 1, ...
realization 1, ...
transient samples realization 2, ...
realization 2]
"""
T1 = np.atleast_1d(np.asarray(T1, dtype=int))
ntrans = T1[0]
if ntrans != 0:
if len(T1) == 1:
# If starting samples of realizations not specified, then we assume
# the realization start at the first sample
T1 = np.append(T1, 0)
# starting index of each realization and length of signal
T1 = np.append(T1[1:], N)
indices = np.array([], dtype=int)
for i in range(len(T1)-1):
trans = T1[i+1] - 1 - np.mod(np.arange(ntrans)[::-1], T1[i+1]-T1[i])
normal = np.arange(T1[i],T1[i+1])
indices = np.hstack((indices, trans, normal))
else:
# No transient points => output = all indices of the signal
indices = np.arange(N)
return indices
def remove_transient_indices_periodic(T1,N,p):
"""Computes indices for transient handling for periodic signals after
filtering
Let u be a vector of length N containing (several realizations of) a
periodic signal. Let uTot be a vector containing the signal(s) in u with
T1[0] transient points prepended to each realization (see
:func:`transient_indices_periodic`). The starting samples of each
realization can be specified in T1[1:]. Let yTot be a vector/matrix
containing the p outputs of a PNLSS model after applying the input uTot.
Then this function computes the indices to be used with the vectorized form
of yTot such that the transient samples are removed from yTot, i.e. y =
yTot[indices] contains the steady-state output(s) stacked on top of each
other.
Parameters
----------
T1 : ndarray(int)
vector that indicates how the transient is handled. The first element
T1[0] is the number of transient samples that were prepended to each
realization. The other elements T1[1:] indicate the starting sample
of each realization in the input signal. If T1 has only one element,
T1[1] is put to zero.
N : int
length of the input signal containing all realizations
p : int
number of outputs
Returns
-------
indices : ndarray(int)
If uTot is a vector containing (several realizations of) a periodic
signal to which T1[0] transient points were added before each
realization, and if yTot is the corresponding output vector (or matrix
if more than one output), then indices is such that the transient
points are removed from y = yTot.flat[indices]. If p > 1, then indices
is a vector and y = yTot.flat[indices] is a vector with the steady
state outputs stacked after each other.
Examples
--------
>>> npp = 1000 # Number of points per period
>>> R = 2 # Number of phase realizations
>>> T = 100 # Number of transient samples
>>> T1 = np.r_[T, np.r_[0:(R-1)*npp+1:npp]] # Transient handling vector
>>> N = R*npp # Total number of samples
>>> indices_tot = transient_indices_periodic(T1,N)
indices_tot = np.r_[900:1000, 0:1000, 1900:2000, 1000:2000]
>>> p = 1 # One output
>>> indices_removal = remove_transient_indices_periodic(T1,N,p)
np.r_[100:1100, 1200:2200]
>>> indices_tot[indices_removal]
np.r_[:2000] # [realization 1, realization 2]
>>> p = 2 # More than one output
>>> indices_removal = remove_transient_indices_periodic(T1,N,p)
np.r_[100:1100, 1200:2200, 2300:3300, 3400:4400]
Let u be a vector containing `[input realization 1, input realization 2]`
then `uTot = u[indices_tot]` is a vector containing::
[transient samples realization 1, input realization 1,
transient samples realization 2, input realization 2]
Let y1 be a vector containing the first output and y2 be a vector
containing the second output when applying uTot as an input to a
PNLSS model, and let `yTot = [y1, y2].T` be a 2 x 2200 matrix with y1
and y2 in its first and second row, respectively.
Note that `y1 = yTot.flat[:2200]` and `y2 = yTot.flat[2200:4400]`
Then `yTot.flat[indices_removal] = np.r_[y1[100:1100], y1[1200:2200],
y2[100:1100], y2[1200:2200]]`::
[output 1 corresponding to input realization 1,
output 1 corresponding to input realization 2,
output 2 corresponding to input realization 1,
output 2 corresponding to input realization 2]
"""
T1 = np.atleast_1d(np.asarray(T1, dtype=int))
ntrans = T1[0]
if ntrans == 0:
return np.arange(N)
if len(T1) == 1:
# If starting samples of realizations not specified, then we assume
# the realization start at the first sample
T1 = np.append(T1, 0)
# starting index of each realization and length of signal
T1 = np.append(T1[1:], N)
indices = np.array([], dtype=int)
for i in range(len(T1)-1):
# Concatenate indices without transient samples
indices = np.hstack((indices,
np.r_[T1[i]:T1[i+1]] + (i+1)*ntrans))
# TODO This is not correct for p>1. We still store y.shape -> (N,p)
# UPDATE 25/02: maybe correct. Gives correct output, see examples
if p > 1:
# Total number of samples per output = number of samples without + with
# transients
nt = N + ntrans*(len(T1)-1)
tmp = np.empty(p*N, dtype=int)
for i in range(p):
# Stack indices without transient samples on top of each other
tmp[i*N:(i+1)*N] = indices + i*nt
indices = tmp
return indices
def remove_transient_indices_nonperiodic(T2,N,p):
"""Remove transients from arbitrary data.
Computes the indices to be used with a (N,p) matrix containing p output
signals of length N, such that y[indices] contains the transient-free
output(s) of length NT stacked on top of each other (if more than one
output). The transient samples to be removed are specified in T2 (T2 =
np.arange(T2) if T2 is scalar).
Parameters
----------
T2 : int
scalar indicating how many samples from the start are removed or array
indicating which samples are removed
N : int
length of the total signal
p : int
number of outputs
Returns
-------
indices : ndarray(int)
vector of indices, such that y[indices] contains the output(s) without
transients. If more than one output (p > 1), then y[indices] stacks the
transient-free outputs on top of each other.
nt : int
length of the signal without transients
Examples
--------
# One output, T2 scalar
>>> N = 1000 # Total number of samples
>>> T2 = 200 # First 200 samples should be removed after filtering
>>> p = 1 # One output
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000] # Indices of the transient-free output
NT = 800 # Number of samples in the transient-free output
# Two outputs, T2 scalar
>>> N = 1000 # Total number of samples
>>> T2 = 200 # First 200 samples should be removed after filtering
>>> p = 2 # Two outputs
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000, 1200:2000]
NT = 800
If y = [y1, y2] is a 1000 x 2 matrix with the two outputs y1 and y2, then
y[indices] = [y1(200:1000]
y2(200:1000)]
is a vector with the transient-free outputs stacked on top of each other
One output, T2 is a vector
>>> N1 = 1000 # Number of samples in a first data set
>>> N2 = 500 # Number of samples in a second data set
>>> N = N1 + N2 # Total number of samples
>>> T2_1 = np.r_[:200] # Transient samples in first data set
>>> T2_2 = np.r_[:100] # Transient samples in second data set
>>> T2 = np.r_[T2_1, N1+T2_2] # Transient samples
>>> p = 1 # One output
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000, 1100:1500]
NT = 1200
"""
if T2 is None:
return np.s_[:N], N
if isinstance(T2, (int, np.integer)): # np.isscalar(T2):
# Remove all samples up to T2
T2 = np.arange(T2)
T2 = np.atleast_1d(np.asarray(T2, dtype=int))
# Remove transient samples from the total
without_T2 = np.delete(np.arange(N), T2)
# Length of the transient-free signal(s)
NT = len(without_T2)
if p > 1: # for multiple outputs
indices = np.zeros(p*NT, dtype=int)
for i in range(p):
# Stack indices for each output on top of each other
indices[i*NT:(i+1)*NT] = without_T2 + i*N
else:
indices = without_T2
return indices, NT
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
from numpy.fft import fft
from scipy.optimize import least_squares
from scipy.signal.lti_conversion import abcd_normalize
from scipy.signal.ltisys import dlsim
from pyvib.common import lm, mmul_weight, weightfcn
from .lti_conversion import discrete2cont, ss2phys
from .modal import modal_ac
def _atleast_2d_or_none(arg):
if arg is not None:
return np.atleast_2d(arg)
class StateSpace():
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
self.inputs = None
self.outputs = None
self._dt = None
self.T1, self.T2 = [None]*2
self.n, self.m, self.p = [0]*3
sys = system
dt = kwargs.pop('dt', True)
super().__init__(**kwargs)
self._A, self._B, self._C, self._D = [None]*4
self.Ac, self.Bc = [None]*2
self.dt = dt
if len(system) == 1: # TODO fix and isinstance(system[0], StateSpace):
sys = system[0]
if isinstance(sys, StateSpace):
sys = sys.A, sys.B, sys.C, sys.D
if len(sys) == 4:
self.A, self.B, self.C, self.D = abcd_normalize(*sys)
else:
pass
#raise ValueError(f'Wrong initialization of SS {type(system)}')
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return (f'{self.__class__.__name__},\n'
f'{repr(self.A)},\n'
f'{repr(self.B)},\n'
f'{repr(self.C)},\n'
f'{repr(self.D)},\n'
f'dt: {repr(self.dt)}')
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
self.n = self.A.shape[0]
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.m = self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.p = self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
@property
def npar(self):
n, m, p = self.n, self.m, self.p
return n**2 + n*m + p*n + p*m
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def _copy(self, *system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
if len(system) == 1 and isinstance(system[0], StateSpace):
A, B, C, D, dt = (system.A, system.B, system.C, system.D, system.dt)
elif len(system) == 4:
A, B, C, D = system
dt = self.dt
else:
raise ValueError('Cannot copy the given system')
self.A = A
self.B = B
self.C = C
self.D = D
self.dt = dt
def _get_shape(self):
# n, m, p
return self.A.shape[0], self.B.shape[1], self.C.shape[0]
def _get_system(self):
return (self.A, self.B, self.C, self.D, self.dt)
def extract(self, x0):
n, m, p = self.n, self.m, self.p
A = x0.flat[:n**2].reshape((n,n))
B = x0.flat[n**2 + np.r_[:n*m]].reshape((n,m))
C = x0.flat[n**2+n*m + np.r_[:p*n]].reshape((p,n))
D = x0.flat[n*(p+m+n):].reshape((p,m))
return A, B, C, D
def flatten(self):
"""Returns the state space as flattened array"""
n, m, p = self.n, self.m, self.p
npar = n**2 + n*m + p*n + p*m
x0 = np.empty(npar)
x0[:n**2] = self.A.ravel()
x0[n**2 + np.r_[:n*m]] = self.B.ravel()
x0[n**2 + n*m + np.r_[:n*p]] = self.C.ravel()
x0[n**2 + n*m + n*p:] = self.D.ravel()
return x0
def transient(self, T1=None, T2=None):
"""Transient handling. t1: periodic, t2: aperiodic
Get transient index. Only needed to run once
"""
self.T1 = T1
self.T2 = T2
sig = self.signal
ns = sig.R * sig.npp
if T1 is not None:
# Extract the transient part of the input
self.idx_trans = transient_indices_periodic(T1, ns)
self.idx_remtrans = remove_transient_indices_periodic(T1, ns,
self.p)
else:
self.idx_trans = np.s_[:ns]
self.idx_remtrans = np.s_[:ns]
if T2 is not None:
self.without_T2, NT = remove_transient_indices_nonperiodic(T2,ns,self.p)
else:
self.without_T2 = np.s_[:ns]
def output(self, u, t=None, x0=None):
system = self._get_system()
return dlsim(system, u, t=t, x0=x0)
def simulate(self, u, t=None, x0=None, T1=None, T2=None):
"""
Return the response of the discrete-time system to input `u` with
transient handling.
See :func:`scipy.signal.dlsim` for details.
"""
# Number of samples
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
ns = u.shape[0]
if T1 is None:
T1 = self.T1
T2 = self.T2
if T1 is not None:
idx = self.idx_trans
else:
idx = transient_indices_periodic(T1, ns)
if T1 is not None:
# Prepend transient samples to the input
u = u[idx]
t, y, x = self.output(u, t=t, x0=x0)
if T1 is not None:
# remove transient samples. p=1 is correct. TODO why?
idx = remove_transient_indices_periodic(T1, ns, p=1)
x = x[idx]
y = y[idx]
t = t[idx]
# save output
self.x_mod = x
self.y_mod = y
return t, y, x
def to_cont(self, method='zoh', alpha=None):
"""convert to cont. time. Only A and B changes"""
self.Ac, self.Bc, *_ = \
discrete2cont(self.A, self.B, self.C, self.D, self.dt,
method=method, alpha=alpha)
@property
def modal(self, update=False):
"""Calculate modal properties using cont. time matrices"""
if self.Ac is None or update is True:
self.to_cont()
return modal_ac(self.Ac, self.C)
def to_phys(self, update=False):
"""Calculate state space matrices in physical domain using a similarity
transform T
"""
# returns A, B, C, T. T is similarity transform
if self.Ac is None or update is True:
self.to_cont()
return ss2phys(self.Ac, self.Bc, self.C)
class NonlinearStateSpace(StateSpace):
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
sys = system
E = np.array([])
F = np.array([])
if len(system) == 6:
E, F = system[4:6]
sys = system[0:4]
super().__init__(*sys,**kwargs)
self.E, self.F = E, F
def __repr__(self):
rep = super().__repr__()
idt = rep.rfind('dt')
inl = rep.find('\n')+1
return (f'{self.__class__.__name__},\n' +
rep[inl:idt] +
f'{repr(self.E)},\n'
f'{repr(self.F)},\n'
f'dt: {repr(self.dt)}')
@property
def E(self):
"""State matrix of the `StateSpace` system."""
return self._E
@E.setter
def E(self, E):
self._E = _atleast_2d_or_none(E)
@property
def F(self):
"""Input matrix of the `StateSpace` system."""
return self._F
@F.setter
def F(self, F):
self._F = _atleast_2d_or_none(F)
@property
def npar(self):
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
n, m, p = self.n, self.m, self.p
return n**2 + n*m + p*n + p*m + ne + nf
def _get_system(self):
return (self.A, self.B, self.C, self.D, self.E, self.F, self.dt)
def _copy(self, *system):
if len(system) == 1 and isinstance(system[0], NonlinearStateSpace):
A, B, C, D, E, F, dt = (system.A, system.B, system.C, system.D,
system.E, system.F, system.dt)
elif len(system) == 6:
A, B, C, D, E, F = system
dt = self.dt
else:
raise ValueError(f'Cannot copy the given system {type(system)}')
self.A, self.B, self.C, self.D, self.E, self.F, self.dt = \
A, B, C, D, E, F, dt
def to_cont(self, method='zoh', alpha=None):
"""convert to cont. time. Only A, B and E changes"""
Bext = np.hstack((self.B, self.E))
Dext = np.hstack((self.D, self.F))
self.Ac, Bcext, *_ = \
discrete2cont(self.A, Bext, self.C, Dext, self.dt,
method=method, alpha=alpha)
self.Bc = Bcext[:,:self.m]
self.Ec = Bcext[:,self.m:]
@property
def weight(self):
if self._weight is None:
self._weight = weightfcn(self.signal.covY)
return self._weight
def costfcn(self, x0=None, weight=False):
if weight is True:
weight = self.weight
if x0 is None:
x0 = self.flatten()
return costfcn_time(x0, self, weight=weight)
def extract(self, x0):
"""Extract state space from from flattened array"""
n, m, p = self.n, self.m, self.p
# index of active elements
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
E = self.E
F = self.F
A = x0.flat[:n**2].reshape((n,n))
B = x0.flat[n**2 + np.r_[:n*m]].reshape((n,m))
C = x0.flat[n**2+n*m + np.r_[:p*n]].reshape((p,n))
D = x0.flat[n*(p+m+n) + np.r_[:p*m]].reshape((p,m))
E.flat[xact] = x0.flat[n*(p+m+n)+p*m + np.r_[:ne]]
F.flat[yact] = x0.flat[n*(p+m+n)+p*m+ne + np.r_[:nf]]
return A, B, C, D, E, F
def flatten(self):
"""Returns the state space as flattened array"""
xact = self.xactive
yact = self.yactive
ne = len(xact)
nf = len(yact)
n, m, p = self.n, self.m, self.p
npar = n**2 + n*m + p*n + p*m + ne + nf
x0 = np.empty(npar)
x0[:n**2] = self.A.ravel()
x0[n**2 + np.r_[:n*m]] = self.B.ravel()
x0[n**2 + n*m + np.r_[:n*p]] = self.C.ravel()
x0[n*(p+m+n) + np.r_[:p*m]] = self.D.ravel()
x0[n*(p+m+n)+p*m + np.r_[:ne]] = self.E.flat[xact]
x0[n*(p+m+n)+p*m+ne + np.r_[:nf]] = self.F.flat[yact]
return x0
class StateSpaceIdent():
def __init__(self):
self._weight = None
def cost(self, x0=None, weight=False):
if weight is True:
weight = self.weight
if x0 is None:
x0 = self.flatten()
err = self.costfcn(x0, weight=weight)
# TODO maybe divide by 2 to match scipy's implementation of minpack
return np.dot(err, err)
def optimize(self, method=None, weight=True, info=2, nmax=50, lamb=None,
ftol=1e-12, xtol=1e-12, gtol=1e-12, copy=False):
"""Optimize the estimated the nonlinear state space matrices"""
if weight is True:
weight = self.weight
self.freq_weight = True
if weight is False:
self.freq_weight = False
if info:
print(f'\nStarting {self.__class__.__name__} optimization')
x0 = self.flatten()
kwargs = {'weight':weight}
if method is None:
res = lm(fun=self.costfcn, x0=x0, jac=self.jacobian, info=info,
nmax=nmax, lamb=lamb, ftol=ftol, xtol=xtol, gtol=gtol,
kwargs=kwargs)
else:
res = least_squares(self.costfcn, x0, self.jacobian, method='lm',
x_scale='jac', kwargs=kwargs)
if copy:
# restore state space matrices to original
self._copy(*self.extract(x0))
nmodel = deepcopy(self)
nmodel._copy(*self.extract(res['x']))
nmodel.res = res
return nmodel
# update the model with the optimized SS matrices
self._copy(*self.extract(res['x']))
self.res = res
def extract_model(self, y, u, t=None, x0=None, T1=None, T2=None,
info=2, copy=False):
"""extract the best model using validation data"""
models = self.res['x_mat']
nmodels = models.shape[0]
ss0 = self.flatten()
err_rms = np.empty(nmodels)
if info:
print(f"{'model':5} | {'rms':12} |")
for i, ss in enumerate(models):
self._copy(*self.extract(ss))
tout, yout, xout = self.simulate(u, t=t, x0=x0, T1=T1, T2=T2)
err_rms[i] = np.sqrt(np.mean((y - yout)**2))
if info:
print(f"{i:5d} | {err_rms[i]:12.8g}")
# best model on new data set
i = np.nanargmin(err_rms)
if info:
print(f"best model is {i} with RMS {err_rms[i]:12.8g}")
ss = models[i]
if copy:
# restore state space matrices to original
self._copy(*self.extract(ss0))
nmodel = deepcopy(self)
nmodel._copy(*self.extract(ss))
return nmodel, err_rms
self._copy(*self.extract(ss))
return err_rms
def costfcn_time(x0, system, weight=False):
"""Compute the vector of residuals such that the function to mimimize is
res = ∑ₖ e[k]ᴴ*e[k], where the error is given by
e = weight*(ŷ - y)
and the weight is the square inverse of the covariance matrix of `y`
"""
# TODO fix transient
# T2 = system.T2
# p is the actual number of output in the signal, not the system output
R, p, npp = system.signal.R, system.signal.p, system.signal.npp
p = system.p
nfd = npp//2
# without_T2 = system.without_T2
# update the state space matrices from x0
# TODO find a way to avoid explicitly updating the state space model.
# It is not the expected behavior that calculating the cost should change
# the model! Right now it is done because simulating is using the systems
# ss matrices
system._copy(*system.extract(x0))
# Compute the (transient-free) modeled output and the corresponding states
t_mod, y_mod, x_mod = system.simulate(system.signal.um)
# Compute the (weighted) error signal without transient
if system.signal._ydm is not None:
ym = np.hstack((system.signal.ym, system.signal._ydm))
else:
ym = system.signal.ym
err = y_mod - ym #[without_T2, :p] - system.signal.ym[without_T2]
if weight is not False and system.freq_weight:
err = err.reshape((npp,R,p),order='F').swapaxes(1,2)
# Select only the positive half of the spectrum
err = fft(err, axis=0)[:nfd]
err = mmul_weight(err, weight)
#cost = np.vdot(err, err).real
err = err.swapaxes(1,2).ravel(order='F')
err_w = np.hstack((err.real.squeeze(), err.imag.squeeze()))
elif weight is not False:
# TODO time domain weighting. Does not work
err_w = err * weight # [without_T2]
#cost = np.dot(err,err)
else:
# no weighting
# TODO are we sure this is the right order?
return err.ravel(order='F')
return err_w
def transient_indices_periodic(T1,N):
"""Computes indices for transient handling of periodic signals.
Computes the indices to be used with a vector u of length N that contains
(several realizations of) a periodic signal, such that u[indices] has T1[0]
transient samples prepended to each realization. The starting samples of
each realization can be specified in T1[1:]. Like this, steady-state data
can be obtained from a PNLSS model by using u[indices] as an input signal
to a PNLSS model (see :meth:`pyvib.PNLSS.simulate`) and removing the
transient samples afterwards (see :func:`remove_transient_indices_periodic`
Parameters
----------
T1 : int | ndarray(int)
array that indicates how the transient is handled. The first element
T1[0] is the number of transient samples that should be prepended to
each realization. The other elements T1[1:] indicate the starting
sample of each realization in the signal. If T1 has only one element,
T1[1] is put to zero, ie. first element.
N : int
length of the signal containing all realizations
Returns
-------
indices : ndarray(int)
indices of a vector u that contains (several realizations of) a
periodic signal, such that u[indices] has a number of transient samples
added before each realization
Examples
--------
>>> npp = 1000 # Number of points per period
>>> R = 2 # Number of phase realizations
>>> T = 100 # Number of transient samples
>>> T1 = np.r_[T, np.r_[0:(R-1)*npp+1:npp]] # Transient handling vector
>>> N = R*npp # Total number of samples
>>> indices = transient_indices_periodic(T1,N)
indices = np.r_[900:1000, 0:1000, 1900:2000, 1000:2000]
= [transient samples realization 1, ...
realization 1, ...
transient samples realization 2, ...
realization 2]
"""
T1 = np.atleast_1d(np.asarray(T1, dtype=int))
ntrans = T1[0]
if ntrans != 0:
if len(T1) == 1:
# If starting samples of realizations not specified, then we assume
# the realization start at the first sample
T1 = np.append(T1, 0)
# starting index of each realization and length of signal
T1 = np.append(T1[1:], N)
indices = np.array([], dtype=int)
for i in range(len(T1)-1):
trans = T1[i+1] - 1 - np.mod(np.arange(ntrans)[::-1], T1[i+1]-T1[i])
normal = np.arange(T1[i],T1[i+1])
indices = np.hstack((indices, trans, normal))
else:
# No transient points => output = all indices of the signal
indices = np.arange(N)
return indices
def remove_transient_indices_periodic(T1,N,p):
"""Computes indices for transient handling for periodic signals after
filtering
Let u be a vector of length N containing (several realizations of) a
periodic signal. Let uTot be a vector containing the signal(s) in u with
T1[0] transient points prepended to each realization (see
:func:`transient_indices_periodic`). The starting samples of each
realization can be specified in T1[1:]. Let yTot be a vector/matrix
containing the p outputs of a PNLSS model after applying the input uTot.
Then this function computes the indices to be used with the vectorized form
of yTot such that the transient samples are removed from yTot, i.e. y =
yTot[indices] contains the steady-state output(s) stacked on top of each
other.
Parameters
----------
T1 : ndarray(int)
vector that indicates how the transient is handled. The first element
T1[0] is the number of transient samples that were prepended to each
realization. The other elements T1[1:] indicate the starting sample
of each realization in the input signal. If T1 has only one element,
T1[1] is put to zero.
N : int
length of the input signal containing all realizations
p : int
number of outputs
Returns
-------
indices : ndarray(int)
If uTot is a vector containing (several realizations of) a periodic
signal to which T1[0] transient points were added before each
realization, and if yTot is the corresponding output vector (or matrix
if more than one output), then indices is such that the transient
points are removed from y = yTot.flat[indices]. If p > 1, then indices
is a vector and y = yTot.flat[indices] is a vector with the steady
state outputs stacked after each other.
Examples
--------
>>> npp = 1000 # Number of points per period
>>> R = 2 # Number of phase realizations
>>> T = 100 # Number of transient samples
>>> T1 = np.r_[T, np.r_[0:(R-1)*npp+1:npp]] # Transient handling vector
>>> N = R*npp # Total number of samples
>>> indices_tot = transient_indices_periodic(T1,N)
indices_tot = np.r_[900:1000, 0:1000, 1900:2000, 1000:2000]
>>> p = 1 # One output
>>> indices_removal = remove_transient_indices_periodic(T1,N,p)
np.r_[100:1100, 1200:2200]
>>> indices_tot[indices_removal]
np.r_[:2000] # [realization 1, realization 2]
>>> p = 2 # More than one output
>>> indices_removal = remove_transient_indices_periodic(T1,N,p)
np.r_[100:1100, 1200:2200, 2300:3300, 3400:4400]
Let u be a vector containing `[input realization 1, input realization 2]`
then `uTot = u[indices_tot]` is a vector containing::
[transient samples realization 1, input realization 1,
transient samples realization 2, input realization 2]
Let y1 be a vector containing the first output and y2 be a vector
containing the second output when applying uTot as an input to a
PNLSS model, and let `yTot = [y1, y2].T` be a 2 x 2200 matrix with y1
and y2 in its first and second row, respectively.
Note that `y1 = yTot.flat[:2200]` and `y2 = yTot.flat[2200:4400]`
Then `yTot.flat[indices_removal] = np.r_[y1[100:1100], y1[1200:2200],
y2[100:1100], y2[1200:2200]]`::
[output 1 corresponding to input realization 1,
output 1 corresponding to input realization 2,
output 2 corresponding to input realization 1,
output 2 corresponding to input realization 2]
"""
T1 = np.atleast_1d(np.asarray(T1, dtype=int))
ntrans = T1[0]
if ntrans == 0:
return np.arange(N)
if len(T1) == 1:
# If starting samples of realizations not specified, then we assume
# the realization start at the first sample
T1 = np.append(T1, 0)
# starting index of each realization and length of signal
T1 = np.append(T1[1:], N)
indices = np.array([], dtype=int)
for i in range(len(T1)-1):
# Concatenate indices without transient samples
indices = np.hstack((indices,
np.r_[T1[i]:T1[i+1]] + (i+1)*ntrans))
# TODO This is not correct for p>1. We still store y.shape -> (N,p)
# UPDATE 25/02: maybe correct. Gives correct output, see examples
if p > 1:
# Total number of samples per output = number of samples without + with
# transients
nt = N + ntrans*(len(T1)-1)
tmp = np.empty(p*N, dtype=int)
for i in range(p):
# Stack indices without transient samples on top of each other
tmp[i*N:(i+1)*N] = indices + i*nt
indices = tmp
return indices
def remove_transient_indices_nonperiodic(T2,N,p):
"""Remove transients from arbitrary data.
Computes the indices to be used with a (N,p) matrix containing p output
signals of length N, such that y[indices] contains the transient-free
output(s) of length NT stacked on top of each other (if more than one
output). The transient samples to be removed are specified in T2 (T2 =
np.arange(T2) if T2 is scalar).
Parameters
----------
T2 : int
scalar indicating how many samples from the start are removed or array
indicating which samples are removed
N : int
length of the total signal
p : int
number of outputs
Returns
-------
indices : ndarray(int)
vector of indices, such that y[indices] contains the output(s) without
transients. If more than one output (p > 1), then y[indices] stacks the
transient-free outputs on top of each other.
nt : int
length of the signal without transients
Examples
--------
# One output, T2 scalar
>>> N = 1000 # Total number of samples
>>> T2 = 200 # First 200 samples should be removed after filtering
>>> p = 1 # One output
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000] # Indices of the transient-free output
NT = 800 # Number of samples in the transient-free output
# Two outputs, T2 scalar
>>> N = 1000 # Total number of samples
>>> T2 = 200 # First 200 samples should be removed after filtering
>>> p = 2 # Two outputs
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000, 1200:2000]
NT = 800
If y = [y1, y2] is a 1000 x 2 matrix with the two outputs y1 and y2, then
y[indices] = [y1(200:1000]
y2(200:1000)]
is a vector with the transient-free outputs stacked on top of each other
One output, T2 is a vector
>>> N1 = 1000 # Number of samples in a first data set
>>> N2 = 500 # Number of samples in a second data set
>>> N = N1 + N2 # Total number of samples
>>> T2_1 = np.r_[:200] # Transient samples in first data set
>>> T2_2 = np.r_[:100] # Transient samples in second data set
>>> T2 = np.r_[T2_1, N1+T2_2] # Transient samples
>>> p = 1 # One output
>>> indices, NT = remove_transient_indices_nonperiodic(T2,N,p)
np.r_[200:1000, 1100:1500]
NT = 1200
"""
if T2 is None:
return np.s_[:N], N
if isinstance(T2, (int, np.integer)): # np.isscalar(T2):
# Remove all samples up to T2
T2 = np.arange(T2)
T2 = np.atleast_1d(np.asarray(T2, dtype=int))
# Remove transient samples from the total
without_T2 = np.delete(np.arange(N), T2)
# Length of the transient-free signal(s)
NT = len(without_T2)
if p > 1: # for multiple outputs
indices = np.zeros(p*NT, dtype=int)
for i in range(p):
# Stack indices for each output on top of each other
indices[i*NT:(i+1)*NT] = without_T2 + i*N
else:
indices = without_T2
return indices, NT
|
"""Tests related to the spending process.
This includes the Spend creation, announcement, broadcast, tracking, managers interaction,
etc..
"""
import pytest
import random
from fixtures import *
from test_framework import serializations
from test_framework.utils import (
COIN,
POSTGRES_IS_SETUP,
RpcError,
wait_for,
)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spendtx_management(revault_network, bitcoind):
CSV = 12
revault_network.deploy(2, 1, n_stkmanagers=1, csv=CSV)
man = revault_network.man(0)
amount = 0.24
vault = revault_network.fund(amount)
deposit = f"{vault["txid"]}:{vault["vout"]}"
addr = bitcoind.rpc.getnewaddress()
spent_vaults = [deposit]
feerate = 2
fees = revault_network.compute_spendtx_fees(feerate, len(spent_vaults), 1)
destination = {addr: vault["amount"] - fees}
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
spend_tx = man.rpc.getspendtx(spent_vaults, destination, feerate)["spend_tx"][
"psbt"
]
# If we are not a manager, it'll fail
with pytest.raises(RpcError, match="This is a manager command"):
revault_network.stk_wallets[0].rpc.updatespendtx(spend_tx)
# But it won't if we are a stakeholder-manager
revault_network.stkman_wallets[0].rpc.updatespendtx(spend_tx)
# It will not accept a spend_tx which spends an unknown Unvault
psbt = serializations.PSBT()
psbt.deserialize(spend_tx)
psbt.tx.vin[0].prevout.hash = 0
insane_spend_tx = psbt.serialize()
with pytest.raises(RpcError, match="Spend transaction refers an unknown Unvault"):
man.rpc.updatespendtx(insane_spend_tx)
# First time, it'll be stored
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Storing new Spend transaction")
# We can actually update it no matter if it's the same
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Updating Spend transaction")
assert len(man.rpc.listspendtxs()["spend_txs"]) == 1
# If we delete it..
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
man.rpc.delspendtx(spend_psbt.tx.hash)
assert len(man.rpc.listspendtxs()["spend_txs"]) == 0
# When we update it it'll be treated as a new transaction
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Storing new Spend transaction")
assert len(man.rpc.listspendtxs()["spend_txs"]) == 1
# Create another Spend transaction spending two vaults
vault_b = revault_network.fund(amount)
deposit_b = f"{vault_b["txid"]}:{vault_b["vout"]}"
addr_b = bitcoind.rpc.getnewaddress()
spent_vaults = [deposit, deposit_b]
feerate = 50
fees = revault_network.compute_spendtx_fees(feerate, len(spent_vaults), 2)
destination = {
addr: (vault_b["amount"] - fees) // 2,
addr_b: (vault_b["amount"] - fees) // 2,
}
revault_network.secure_vault(vault_b)
revault_network.activate_vault(vault_b)
spend_tx_b = man.rpc.getspendtx(spent_vaults, destination, feerate)["spend_tx"][
"psbt"
]
man.rpc.updatespendtx(spend_tx_b)
man.wait_for_log("Storing new Spend transaction")
spend_txs = man.rpc.listspendtxs()["spend_txs"]
assert len(spend_txs) == 2
assert {
"deposit_outpoints": [deposit],
"deposit_amount": vault["amount"],
"cpfp_amount": 48224,
"psbt": spend_tx,
"change_index": None,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs
assert {
"deposit_outpoints": [deposit_b, deposit],
"deposit_amount": vault["amount"] + vault_b["amount"],
"cpfp_amount": 95808,
"psbt": spend_tx_b,
"change_index": 3,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs or {
"deposit_outpoints": [deposit, deposit_b],
"deposit_amount": vault["amount"] + vault_b["amount"],
"cpfp_amount": 95808,
"psbt": spend_tx_b,
"change_index": 3,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs
# Now we could try to broadcast it..
# But we couldn't broadcast a random txid
with pytest.raises(RpcError, match="Unknown Spend transaction"):
man.rpc.setspendtx(
"d5eb741a31ebf4d2f5d6ae223900f1bd996e209150d3604fca7d9fa5d6136337"
)
# ..And even with an existing one we would have to sign it beforehand!
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match=f"Not enough signatures, needed: {len(revault_network.mans())}, current: 0",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
# Now, sign the Spend we are going to broadcast
deriv_indexes = [vault["derivation_index"], vault_b["derivation_index"]]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, deriv_indexes)
# Just before broadcasting it, prepare a competing one to later try to make Cosigning Servers
# sign twice
vault_c = revault_network.fund(amount / 2)
deposit_c = f"{vault_c["txid"]}:{vault_c["vout"]}"
rogue_spent_vaults = [deposit, deposit_b, deposit_c]
feerate = 50
fees = revault_network.compute_spendtx_fees(feerate, len(rogue_spent_vaults), 2)
destination = {
addr: (vault_b["amount"] - fees) // 2,
addr_b: (vault_b["amount"] - fees) // 2,
}
revault_network.secure_vault(vault_c)
revault_network.activate_vault(vault_c)
rogue_spend_tx = man.rpc.getspendtx(rogue_spent_vaults, destination, feerate)[
"spend_tx"
]["psbt"]
deriv_indexes = deriv_indexes + [vault_c["derivation_index"]]
for man in revault_network.mans():
rogue_spend_tx = man.man_keychain.sign_spend_psbt(rogue_spend_tx, deriv_indexes)
man.rpc.updatespendtx(rogue_spend_tx)
# Then broadcast the actual Spend
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
spend_tx_b = spend_psbt.serialize()
man.rpc.updatespendtx(spend_tx_b)
man.rpc.setspendtx(spend_psbt.tx.hash)
# If we show good faith (ask again for the same set of outpoints), Cosigning Servers will
# try to be helpful.
man.rpc.setspendtx(spend_psbt.tx.hash)
# However, they won't let us trying to sneak in another outpoint
rogue_spend_psbt = serializations.PSBT()
rogue_spend_psbt.deserialize(rogue_spend_tx)
rogue_spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match="one Cosigning Server already signed a Spend transaction spending one of these vaults",
):
man.rpc.setspendtx(rogue_spend_psbt.tx.hash)
# It gets marked as in the process of being unvaulted immediately (next bitcoind
# poll), and will get marked as succesfully unvaulted after a single confirmation.
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], spent_vaults)["vaults"])
== len(spent_vaults)
)
bitcoind.generate_block(1, wait_for_mempool=len(spent_vaults))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], spent_vaults)["vaults"])
== len(spent_vaults)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'")
wait_for(
lambda: len(man.rpc.listvaults(["spending"], spent_vaults)["vaults"])
== len(spent_vaults)
)
# And the vault we tried to sneak in wasn't even unvaulted
assert len(man.rpc.listvaults(["active"], [deposit_c])["vaults"]) == 1
bitcoind.generate_block(8)
wait_for(
lambda: len(man.rpc.listvaults(["spent"], spent_vaults)["vaults"])
== len(spent_vaults)
)
txs = man.rpc.listspendtxs()["spend_txs"]
txs.sort(key=lambda tx: tx["deposit_amount"])
# The spend is confirmed
spend_tx = txs[0]
assert deposit in spend_tx["deposit_outpoints"]
assert deposit_b in spend_tx["deposit_outpoints"]
assert spend_tx["deposit_amount"] == vault["amount"] + vault_b["amount"]
assert spend_tx["cpfp_amount"] == 95808
assert spend_tx["change_index"] == 3
assert spend_tx["cpfp_index"] == 0
assert spend_tx["status"] == "confirmed"
# The conflicting spend is deprecated
rogue_spend_tx = txs[1]
assert deposit in rogue_spend_tx["deposit_outpoints"]
assert deposit_b in rogue_spend_tx["deposit_outpoints"]
assert deposit_c in rogue_spend_tx["deposit_outpoints"]
assert (
rogue_spend_tx["deposit_amount"]
== vault["amount"] + vault_b["amount"] + vault_c["amount"]
)
assert spend_tx["cpfp_amount"] == 95808
assert rogue_spend_tx["change_index"] == 3
assert rogue_spend_tx["cpfp_index"] == 0
assert rogue_spend_tx["status"] == "deprecated"
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spends_concurrent(revault_network, bitcoind):
"""
Here we test the creation and succesful broadcast of both Spend transaction
concurrently handled but non conflicting.
"""
CSV = 1024
revault_network.deploy(3, 2, csv=CSV)
man = revault_network.man(1)
# FIXME: there is something up with higher number and the test framework fee
# computation
amounts = [0.22, 16, 3, 21]
vaults = revault_network.fundmany(amounts)
# Edge case: bitcoind can actually mess up with the amounts
amounts = []
deposits = []
deriv_indexes = []
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
deposits.append(f"{v["txid"]}:{v["vout"]}")
deriv_indexes.append(v["derivation_index"])
amounts.append(v["amount"])
(deposits_a, deposits_b) = (deposits[:2], deposits[2:])
(amounts_a, amounts_b) = (amounts[:2], amounts[2:])
(indexes_a, indexes_b) = (deriv_indexes[:2], deriv_indexes[2:])
# Spending to a P2WSH (effectively a change but hey), with a change output
destinations = {man.rpc.getdepositaddress()["address"]: sum(amounts_a) // 2}
spend_tx_a = man.rpc.getspendtx(deposits_a, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_a = man.man_keychain.sign_spend_psbt(spend_tx_a, indexes_a)
man.rpc.updatespendtx(spend_tx_a)
# Spending to a P2WPKH, with a change output
destinations = {bitcoind.rpc.getnewaddress(): sum(amounts_b) // 2}
spend_tx_b = man.rpc.getspendtx(deposits_b, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, indexes_b)
man.rpc.updatespendtx(spend_tx_b)
# Of course, we can just stop and still broadcast the Spend
man.stop()
man.proc.wait(10)
man.start()
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_a)
spend_psbt.tx.calc_sha256()
spend_txid_a = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_a)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
spend_txid_b = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_b)
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_logs(
[
f"Succesfully broadcasted Spend tx '{spend_txid_a}'",
f"Succesfully broadcasted Spend tx '{spend_txid_b}'",
]
)
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_txid_a, spend_txid_b])
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits)
)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spends_conflicting(revault_network, bitcoind):
"""
Here we test two spends which spends 2 vaults each, with one shared and all vaults
being created from the same Deposit transaction.
"""
# Get some more coins
bitcoind.generate_block(12)
CSV = 112
revault_network.deploy(5, 3, csv=CSV)
man = revault_network.man(0)
amounts = [0.1, 64, 410]
vaults = revault_network.fundmany(amounts)
assert len(vaults) == len(amounts)
# Edge case: bitcoind can actually mess up with the amounts
amounts = []
deposits = []
deriv_indexes = []
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
deposits.append(f"{v["txid"]}:{v["vout"]}")
deriv_indexes.append(v["derivation_index"])
amounts.append(v["amount"])
(deposits_a, deposits_b) = (deposits[:2], deposits[1:])
(amounts_a, amounts_b) = (amounts[:2], amounts[1:])
(indexes_a, indexes_b) = (deriv_indexes[:2], deriv_indexes[1:])
feerate = 5_000
fees = revault_network.compute_spendtx_fees(feerate, len(deposits_a), 1)
destinations = {bitcoind.rpc.getnewaddress(): sum(amounts_a) - fees}
spend_tx_a = man.rpc.getspendtx(deposits_a, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_a = man.man_keychain.sign_spend_psbt(spend_tx_a, indexes_a)
man.rpc.updatespendtx(spend_tx_a)
feerate = 10_000
fees = revault_network.compute_spendtx_fees(feerate, len(deposits_b), 1, True)
destinations = {bitcoind.rpc.getnewaddress(): (sum(amounts_b) - fees) // 2}
spend_tx_b = man.rpc.getspendtx(deposits_b, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, indexes_b)
man.rpc.updatespendtx(spend_tx_b)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_a)
spend_psbt.tx.calc_sha256()
spend_txid_a = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_a)
# We can ask the Cosigning Servers their signature again for the very same Spend
man.rpc.setspendtx(spend_txid_a)
# The two Spend have conflicting inputs, therefore the Cosigning Server won't
# accept to sign the second one.
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match="one Cosigning Server already signed a Spend transaction spending one of these vaults",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits_a)["vaults"])
== len(deposits_a)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits_a))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits_a)["vaults"])
== len(deposits_a)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_txid_a}'",
)
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits_a)["vaults"])
== len(deposits_a)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_txid_a])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits_a)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spend_threshold(revault_network, bitcoind, executor):
CSV = 20
managers_threshold = 2
revault_network.deploy(4, 3, csv=CSV, managers_threshold=managers_threshold)
man = revault_network.man(0)
# Get some more funds
bitcoind.generate_block(1)
vaults = []
deposits = []
deriv_indexes = []
total_amount = 0
for i in range(5):
amount = random.randint(5, 5000) / 100
vaults.append(revault_network.fund(amount))
deposits.append(f"{vaults[i]["txid"]}:{vaults[i]["vout"]}")
deriv_indexes.append(vaults[i]["derivation_index"])
total_amount += vaults[i]["amount"]
revault_network.activate_fresh_vaults(vaults)
feerate = 1
n_outputs = 3
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): (total_amount - fees) // n_outputs
for _ in range(n_outputs)
}
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
# Trying to broadcast when managers_threshold - 1 managers signed
for man in revault_network.mans()[: managers_threshold - 1]:
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
# Revaultd didn't like it
with pytest.raises(
RpcError,
match=f"Not enough signatures, needed: {managers_threshold}, current: {managers_threshold - 1}'",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
# Killing the daemon and restart shouldn't cause any issue
for m in revault_network.mans():
m.stop()
m.start()
# Alright, I'll make the last manager sign...
man = revault_network.mans()[managers_threshold]
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
# All good now?
man.rpc.setspendtx(spend_psbt.tx.hash)
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# Killing the daemon and restart it while unvaulting shouldn't cause
# any issue
for m in revault_network.mans():
m.stop()
m.start()
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'",
)
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits)
)
for vault in m.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_large_spends(revault_network, bitcoind, executor):
CSV = 2016 # 2 weeks :tm:
revault_network.deploy(17, 8, csv=CSV)
man = revault_network.man(0)
# Get some more funds
bitcoind.generate_block(1)
deposits = []
deriv_indexes = []
total_amount = 0
for _ in range(5):
new_vaults = []
for i in range(2):
amount = random.randint(5, 5000) / 100
new_vaults.append(revault_network.fund(amount))
deposits.append(f"{new_vaults[i]["txid"]}:{new_vaults[i]["vout"]}")
deriv_indexes.append(new_vaults[i]["derivation_index"])
total_amount += new_vaults[i]["amount"]
revault_network.activate_fresh_vaults(new_vaults)
feerate = 1
n_outputs = random.randint(1, 3)
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): (total_amount - fees) // n_outputs
for _ in range(n_outputs)
}
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
man.rpc.setspendtx(spend_psbt.tx.hash)
# Killing the daemon and restart it while unvaulting shouldn't cause
# any issue
for man in revault_network.mans():
man.stop()
man.start()
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
# Note that bitcoind's RPC socket may timeout if it needs to generate too many
# blocks at once. So, spread them a bit.
for _ in range(10):
bitcoind.generate_block(CSV // 10)
bitcoind.generate_block(CSV % 10 - 1)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'",
)
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"]) == len(deposits)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
# Tests that getspendtx returns an error when trying to build a spend too big
# (it wouldn't be possible to announce it to the coordinator when fully signed)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_not_announceable_spend(revault_network, bitcoind, executor):
CSV = 4
revault_network.deploy(5, 7, csv=CSV)
man = revault_network.man(0)
vaults = []
for _ in range(5):
amounts = [(i + 1) / 100 for i in range(4)]
new_vaults = revault_network.fundmany(amounts)
revault_network.activate_fresh_vaults(new_vaults)
vaults += new_vaults
total_amount = sum(v["amount"] for v in vaults)
deposits = [f"{v["txid"]}:{v["vout"]}" for v in vaults]
deriv_indexes = [v["derivation_index"] for v in vaults]
feerate = 1
n_outputs = 588
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
output_value = int((total_amount - fees) // n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): output_value for _ in range(n_outputs)
}
# Hey, this spend is huge!
with pytest.raises(
RpcError, match="Spend transaction is too large, try spending less outpoints'"
):
man.rpc.getspendtx(deposits, destinations, feerate)
# One less spent outpoint is ok though
deposits.pop()
deriv_indexes.pop()
amounts.pop()
total_amount = sum(amounts) * COIN
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
output_value = int((total_amount - fees) // n_outputs)
for addr in destinations:
destinations[addr] = output_value
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
spend_txid = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid)
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(f"Succesfully broadcasted Spend tx '{spend_txid}'")
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"]) == len(deposits)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_revaulted_spend(revault_network, bitcoind, executor):
"""
Revault an ongoing Spend transaction carried out by the managers, under misc
circumstances.
"""
CSV = 12
revault_network.deploy(2, 2, n_stkmanagers=1, csv=CSV)
mans = revault_network.mans()
stks = revault_network.stks()
# Simple case. Managers Spend a single vault.
vault = revault_network.fund(0.05)
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
revault_network.spend_vaults_anyhow_unconfirmed([vault])
revault_network.cancel_vault(vault)
# Managers spend two vaults, both are canceled.
vaults = [revault_network.fund(0.05), revault_network.fund(0.1)]
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
revault_network.unvault_vaults_anyhow(vaults)
for vault in vaults:
revault_network.cancel_vault(vault)
# Managers spend three vaults, only a single one is canceled. And both of them were
# created in the same deposit transaction.
vaults = revault_network.fundmany([0.2, 0.08])
vaults.append(revault_network.fund(0.03))
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
revault_network.unvault_vaults_anyhow(vaults)
revault_network.cancel_vault(vaults[0])
# vaults[0] is canceled, therefore the Spend transaction is now invalid. The vaults
# should be marked as unvaulted since they are not being spent anymore.
deposits = [f"{v["txid"]}:{v["vout"]}" for v in vaults[1:]]
for w in mans + stks:
wait_for(
lambda: len(w.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# Test that the coordinator will broadcast our spends
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_coordinator_broadcast(revault_network, bitcoind, executor):
"""
Test that the coordinator broadcasts spend transactions when they become valid
"""
CSV = 12
revault_network.deploy(2, 2, n_stkmanagers=1, csv=CSV)
vault = revault_network.fund(0.05)
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
revault_network.unvault_vaults_anyhow([vault])
revault_network.stop_wallets()
bitcoind.generate_block(CSV - 1)
bitcoind.generate_block(1, wait_for_mempool=1)
revault_network.start_wallets()
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spent"])["vaults"]) == 1,
)
| """Tests related to the spending process.
This includes the Spend creation, announcement, broadcast, tracking, managers interaction,
etc..
"""
import pytest
import random
from fixtures import *
from test_framework import serializations
from test_framework.utils import (
COIN,
POSTGRES_IS_SETUP,
RpcError,
wait_for,
)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spendtx_management(revault_network, bitcoind):
CSV = 12
revault_network.deploy(2, 1, n_stkmanagers=1, csv=CSV)
man = revault_network.man(0)
amount = 0.24
vault = revault_network.fund(amount)
deposit = f"{vault['txid']}:{vault['vout']}"
addr = bitcoind.rpc.getnewaddress()
spent_vaults = [deposit]
feerate = 2
fees = revault_network.compute_spendtx_fees(feerate, len(spent_vaults), 1)
destination = {addr: vault["amount"] - fees}
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
spend_tx = man.rpc.getspendtx(spent_vaults, destination, feerate)["spend_tx"][
"psbt"
]
# If we are not a manager, it'll fail
with pytest.raises(RpcError, match="This is a manager command"):
revault_network.stk_wallets[0].rpc.updatespendtx(spend_tx)
# But it won't if we are a stakeholder-manager
revault_network.stkman_wallets[0].rpc.updatespendtx(spend_tx)
# It will not accept a spend_tx which spends an unknown Unvault
psbt = serializations.PSBT()
psbt.deserialize(spend_tx)
psbt.tx.vin[0].prevout.hash = 0
insane_spend_tx = psbt.serialize()
with pytest.raises(RpcError, match="Spend transaction refers an unknown Unvault"):
man.rpc.updatespendtx(insane_spend_tx)
# First time, it'll be stored
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Storing new Spend transaction")
# We can actually update it no matter if it's the same
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Updating Spend transaction")
assert len(man.rpc.listspendtxs()["spend_txs"]) == 1
# If we delete it..
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
man.rpc.delspendtx(spend_psbt.tx.hash)
assert len(man.rpc.listspendtxs()["spend_txs"]) == 0
# When we update it it'll be treated as a new transaction
man.rpc.updatespendtx(spend_tx)
man.wait_for_log("Storing new Spend transaction")
assert len(man.rpc.listspendtxs()["spend_txs"]) == 1
# Create another Spend transaction spending two vaults
vault_b = revault_network.fund(amount)
deposit_b = f"{vault_b['txid']}:{vault_b['vout']}"
addr_b = bitcoind.rpc.getnewaddress()
spent_vaults = [deposit, deposit_b]
feerate = 50
fees = revault_network.compute_spendtx_fees(feerate, len(spent_vaults), 2)
destination = {
addr: (vault_b["amount"] - fees) // 2,
addr_b: (vault_b["amount"] - fees) // 2,
}
revault_network.secure_vault(vault_b)
revault_network.activate_vault(vault_b)
spend_tx_b = man.rpc.getspendtx(spent_vaults, destination, feerate)["spend_tx"][
"psbt"
]
man.rpc.updatespendtx(spend_tx_b)
man.wait_for_log("Storing new Spend transaction")
spend_txs = man.rpc.listspendtxs()["spend_txs"]
assert len(spend_txs) == 2
assert {
"deposit_outpoints": [deposit],
"deposit_amount": vault["amount"],
"cpfp_amount": 48224,
"psbt": spend_tx,
"change_index": None,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs
assert {
"deposit_outpoints": [deposit_b, deposit],
"deposit_amount": vault["amount"] + vault_b["amount"],
"cpfp_amount": 95808,
"psbt": spend_tx_b,
"change_index": 3,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs or {
"deposit_outpoints": [deposit, deposit_b],
"deposit_amount": vault["amount"] + vault_b["amount"],
"cpfp_amount": 95808,
"psbt": spend_tx_b,
"change_index": 3,
"cpfp_index": 0,
"status": "non_final",
} in spend_txs
# Now we could try to broadcast it..
# But we couldn't broadcast a random txid
with pytest.raises(RpcError, match="Unknown Spend transaction"):
man.rpc.setspendtx(
"d5eb741a31ebf4d2f5d6ae223900f1bd996e209150d3604fca7d9fa5d6136337"
)
# ..And even with an existing one we would have to sign it beforehand!
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match=f"Not enough signatures, needed: {len(revault_network.mans())}, current: 0",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
# Now, sign the Spend we are going to broadcast
deriv_indexes = [vault["derivation_index"], vault_b["derivation_index"]]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, deriv_indexes)
# Just before broadcasting it, prepare a competing one to later try to make Cosigning Servers
# sign twice
vault_c = revault_network.fund(amount / 2)
deposit_c = f"{vault_c['txid']}:{vault_c['vout']}"
rogue_spent_vaults = [deposit, deposit_b, deposit_c]
feerate = 50
fees = revault_network.compute_spendtx_fees(feerate, len(rogue_spent_vaults), 2)
destination = {
addr: (vault_b["amount"] - fees) // 2,
addr_b: (vault_b["amount"] - fees) // 2,
}
revault_network.secure_vault(vault_c)
revault_network.activate_vault(vault_c)
rogue_spend_tx = man.rpc.getspendtx(rogue_spent_vaults, destination, feerate)[
"spend_tx"
]["psbt"]
deriv_indexes = deriv_indexes + [vault_c["derivation_index"]]
for man in revault_network.mans():
rogue_spend_tx = man.man_keychain.sign_spend_psbt(rogue_spend_tx, deriv_indexes)
man.rpc.updatespendtx(rogue_spend_tx)
# Then broadcast the actual Spend
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
spend_tx_b = spend_psbt.serialize()
man.rpc.updatespendtx(spend_tx_b)
man.rpc.setspendtx(spend_psbt.tx.hash)
# If we show good faith (ask again for the same set of outpoints), Cosigning Servers will
# try to be helpful.
man.rpc.setspendtx(spend_psbt.tx.hash)
# However, they won't let us trying to sneak in another outpoint
rogue_spend_psbt = serializations.PSBT()
rogue_spend_psbt.deserialize(rogue_spend_tx)
rogue_spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match="one Cosigning Server already signed a Spend transaction spending one of these vaults",
):
man.rpc.setspendtx(rogue_spend_psbt.tx.hash)
# It gets marked as in the process of being unvaulted immediately (next bitcoind
# poll), and will get marked as succesfully unvaulted after a single confirmation.
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], spent_vaults)["vaults"])
== len(spent_vaults)
)
bitcoind.generate_block(1, wait_for_mempool=len(spent_vaults))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], spent_vaults)["vaults"])
== len(spent_vaults)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'")
wait_for(
lambda: len(man.rpc.listvaults(["spending"], spent_vaults)["vaults"])
== len(spent_vaults)
)
# And the vault we tried to sneak in wasn't even unvaulted
assert len(man.rpc.listvaults(["active"], [deposit_c])["vaults"]) == 1
bitcoind.generate_block(8)
wait_for(
lambda: len(man.rpc.listvaults(["spent"], spent_vaults)["vaults"])
== len(spent_vaults)
)
txs = man.rpc.listspendtxs()["spend_txs"]
txs.sort(key=lambda tx: tx["deposit_amount"])
# The spend is confirmed
spend_tx = txs[0]
assert deposit in spend_tx["deposit_outpoints"]
assert deposit_b in spend_tx["deposit_outpoints"]
assert spend_tx["deposit_amount"] == vault["amount"] + vault_b["amount"]
assert spend_tx["cpfp_amount"] == 95808
assert spend_tx["change_index"] == 3
assert spend_tx["cpfp_index"] == 0
assert spend_tx["status"] == "confirmed"
# The conflicting spend is deprecated
rogue_spend_tx = txs[1]
assert deposit in rogue_spend_tx["deposit_outpoints"]
assert deposit_b in rogue_spend_tx["deposit_outpoints"]
assert deposit_c in rogue_spend_tx["deposit_outpoints"]
assert (
rogue_spend_tx["deposit_amount"]
== vault["amount"] + vault_b["amount"] + vault_c["amount"]
)
assert spend_tx["cpfp_amount"] == 95808
assert rogue_spend_tx["change_index"] == 3
assert rogue_spend_tx["cpfp_index"] == 0
assert rogue_spend_tx["status"] == "deprecated"
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spends_concurrent(revault_network, bitcoind):
"""
Here we test the creation and succesful broadcast of both Spend transaction
concurrently handled but non conflicting.
"""
CSV = 1024
revault_network.deploy(3, 2, csv=CSV)
man = revault_network.man(1)
# FIXME: there is something up with higher number and the test framework fee
# computation
amounts = [0.22, 16, 3, 21]
vaults = revault_network.fundmany(amounts)
# Edge case: bitcoind can actually mess up with the amounts
amounts = []
deposits = []
deriv_indexes = []
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
deposits.append(f"{v['txid']}:{v['vout']}")
deriv_indexes.append(v["derivation_index"])
amounts.append(v["amount"])
(deposits_a, deposits_b) = (deposits[:2], deposits[2:])
(amounts_a, amounts_b) = (amounts[:2], amounts[2:])
(indexes_a, indexes_b) = (deriv_indexes[:2], deriv_indexes[2:])
# Spending to a P2WSH (effectively a change but hey), with a change output
destinations = {man.rpc.getdepositaddress()["address"]: sum(amounts_a) // 2}
spend_tx_a = man.rpc.getspendtx(deposits_a, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_a = man.man_keychain.sign_spend_psbt(spend_tx_a, indexes_a)
man.rpc.updatespendtx(spend_tx_a)
# Spending to a P2WPKH, with a change output
destinations = {bitcoind.rpc.getnewaddress(): sum(amounts_b) // 2}
spend_tx_b = man.rpc.getspendtx(deposits_b, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, indexes_b)
man.rpc.updatespendtx(spend_tx_b)
# Of course, we can just stop and still broadcast the Spend
man.stop()
man.proc.wait(10)
man.start()
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_a)
spend_psbt.tx.calc_sha256()
spend_txid_a = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_a)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
spend_txid_b = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_b)
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_logs(
[
f"Succesfully broadcasted Spend tx '{spend_txid_a}'",
f"Succesfully broadcasted Spend tx '{spend_txid_b}'",
]
)
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_txid_a, spend_txid_b])
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits)
)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spends_conflicting(revault_network, bitcoind):
"""
Here we test two spends which spends 2 vaults each, with one shared and all vaults
being created from the same Deposit transaction.
"""
# Get some more coins
bitcoind.generate_block(12)
CSV = 112
revault_network.deploy(5, 3, csv=CSV)
man = revault_network.man(0)
amounts = [0.1, 64, 410]
vaults = revault_network.fundmany(amounts)
assert len(vaults) == len(amounts)
# Edge case: bitcoind can actually mess up with the amounts
amounts = []
deposits = []
deriv_indexes = []
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
deposits.append(f"{v['txid']}:{v['vout']}")
deriv_indexes.append(v["derivation_index"])
amounts.append(v["amount"])
(deposits_a, deposits_b) = (deposits[:2], deposits[1:])
(amounts_a, amounts_b) = (amounts[:2], amounts[1:])
(indexes_a, indexes_b) = (deriv_indexes[:2], deriv_indexes[1:])
feerate = 5_000
fees = revault_network.compute_spendtx_fees(feerate, len(deposits_a), 1)
destinations = {bitcoind.rpc.getnewaddress(): sum(amounts_a) - fees}
spend_tx_a = man.rpc.getspendtx(deposits_a, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_a = man.man_keychain.sign_spend_psbt(spend_tx_a, indexes_a)
man.rpc.updatespendtx(spend_tx_a)
feerate = 10_000
fees = revault_network.compute_spendtx_fees(feerate, len(deposits_b), 1, True)
destinations = {bitcoind.rpc.getnewaddress(): (sum(amounts_b) - fees) // 2}
spend_tx_b = man.rpc.getspendtx(deposits_b, destinations, 1)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx_b = man.man_keychain.sign_spend_psbt(spend_tx_b, indexes_b)
man.rpc.updatespendtx(spend_tx_b)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_a)
spend_psbt.tx.calc_sha256()
spend_txid_a = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid_a)
# We can ask the Cosigning Servers their signature again for the very same Spend
man.rpc.setspendtx(spend_txid_a)
# The two Spend have conflicting inputs, therefore the Cosigning Server won't
# accept to sign the second one.
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx_b)
spend_psbt.tx.calc_sha256()
with pytest.raises(
RpcError,
match="one Cosigning Server already signed a Spend transaction spending one of these vaults",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits_a)["vaults"])
== len(deposits_a)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits_a))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits_a)["vaults"])
== len(deposits_a)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_txid_a}'",
)
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits_a)["vaults"])
== len(deposits_a)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_txid_a])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits_a)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_spend_threshold(revault_network, bitcoind, executor):
CSV = 20
managers_threshold = 2
revault_network.deploy(4, 3, csv=CSV, managers_threshold=managers_threshold)
man = revault_network.man(0)
# Get some more funds
bitcoind.generate_block(1)
vaults = []
deposits = []
deriv_indexes = []
total_amount = 0
for i in range(5):
amount = random.randint(5, 5000) / 100
vaults.append(revault_network.fund(amount))
deposits.append(f"{vaults[i]['txid']}:{vaults[i]['vout']}")
deriv_indexes.append(vaults[i]["derivation_index"])
total_amount += vaults[i]["amount"]
revault_network.activate_fresh_vaults(vaults)
feerate = 1
n_outputs = 3
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): (total_amount - fees) // n_outputs
for _ in range(n_outputs)
}
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
# Trying to broadcast when managers_threshold - 1 managers signed
for man in revault_network.mans()[: managers_threshold - 1]:
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
# Revaultd didn't like it
with pytest.raises(
RpcError,
match=f"Not enough signatures, needed: {managers_threshold}, current: {managers_threshold - 1}'",
):
man.rpc.setspendtx(spend_psbt.tx.hash)
# Killing the daemon and restart shouldn't cause any issue
for m in revault_network.mans():
m.stop()
m.start()
# Alright, I'll make the last manager sign...
man = revault_network.mans()[managers_threshold]
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
# All good now?
man.rpc.setspendtx(spend_psbt.tx.hash)
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# Killing the daemon and restart it while unvaulting shouldn't cause
# any issue
for m in revault_network.mans():
m.stop()
m.start()
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'",
)
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
for m in revault_network.mans():
wait_for(
lambda: len(m.rpc.listvaults(["spent"], deposits)["vaults"])
== len(deposits)
)
for vault in m.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_large_spends(revault_network, bitcoind, executor):
CSV = 2016 # 2 weeks :tm:
revault_network.deploy(17, 8, csv=CSV)
man = revault_network.man(0)
# Get some more funds
bitcoind.generate_block(1)
deposits = []
deriv_indexes = []
total_amount = 0
for _ in range(5):
new_vaults = []
for i in range(2):
amount = random.randint(5, 5000) / 100
new_vaults.append(revault_network.fund(amount))
deposits.append(f"{new_vaults[i]['txid']}:{new_vaults[i]['vout']}")
deriv_indexes.append(new_vaults[i]["derivation_index"])
total_amount += new_vaults[i]["amount"]
revault_network.activate_fresh_vaults(new_vaults)
feerate = 1
n_outputs = random.randint(1, 3)
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): (total_amount - fees) // n_outputs
for _ in range(n_outputs)
}
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
man.rpc.setspendtx(spend_psbt.tx.hash)
# Killing the daemon and restart it while unvaulting shouldn't cause
# any issue
for man in revault_network.mans():
man.stop()
man.start()
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
# Note that bitcoind's RPC socket may timeout if it needs to generate too many
# blocks at once. So, spread them a bit.
for _ in range(10):
bitcoind.generate_block(CSV // 10)
bitcoind.generate_block(CSV % 10 - 1)
man.wait_for_log(
f"Succesfully broadcasted Spend tx '{spend_psbt.tx.hash}'",
)
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"]) == len(deposits)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
# Tests that getspendtx returns an error when trying to build a spend too big
# (it wouldn't be possible to announce it to the coordinator when fully signed)
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_not_announceable_spend(revault_network, bitcoind, executor):
CSV = 4
revault_network.deploy(5, 7, csv=CSV)
man = revault_network.man(0)
vaults = []
for _ in range(5):
amounts = [(i + 1) / 100 for i in range(4)]
new_vaults = revault_network.fundmany(amounts)
revault_network.activate_fresh_vaults(new_vaults)
vaults += new_vaults
total_amount = sum(v["amount"] for v in vaults)
deposits = [f"{v['txid']}:{v['vout']}" for v in vaults]
deriv_indexes = [v["derivation_index"] for v in vaults]
feerate = 1
n_outputs = 588
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
output_value = int((total_amount - fees) // n_outputs)
destinations = {
bitcoind.rpc.getnewaddress(): output_value for _ in range(n_outputs)
}
# Hey, this spend is huge!
with pytest.raises(
RpcError, match="Spend transaction is too large, try spending less outpoints'"
):
man.rpc.getspendtx(deposits, destinations, feerate)
# One less spent outpoint is ok though
deposits.pop()
deriv_indexes.pop()
amounts.pop()
total_amount = sum(amounts) * COIN
fees = revault_network.compute_spendtx_fees(feerate, len(deposits), n_outputs)
output_value = int((total_amount - fees) // n_outputs)
for addr in destinations:
destinations[addr] = output_value
spend_tx = man.rpc.getspendtx(deposits, destinations, feerate)["spend_tx"]["psbt"]
for man in revault_network.mans():
spend_tx = man.man_keychain.sign_spend_psbt(spend_tx, deriv_indexes)
man.rpc.updatespendtx(spend_tx)
spend_psbt = serializations.PSBT()
spend_psbt.deserialize(spend_tx)
spend_psbt.tx.calc_sha256()
spend_txid = spend_psbt.tx.hash
man.rpc.setspendtx(spend_txid)
wait_for(
lambda: len(man.rpc.listvaults(["unvaulting"], deposits)["vaults"])
== len(deposits)
)
# We need a single confirmation to consider the Unvault transaction confirmed
bitcoind.generate_block(1, wait_for_mempool=len(deposits))
wait_for(
lambda: len(man.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# We'll broadcast the Spend transaction as soon as it's valid
bitcoind.generate_block(CSV - 1)
man.wait_for_log(f"Succesfully broadcasted Spend tx '{spend_txid}'")
wait_for(
lambda: len(man.rpc.listvaults(["spending"], deposits)["vaults"])
== len(deposits)
)
# And will mark it as spent after a single confirmation of the Spend tx
bitcoind.generate_block(1, wait_for_mempool=[spend_psbt.tx.hash])
wait_for(
lambda: len(man.rpc.listvaults(["spent"], deposits)["vaults"]) == len(deposits)
)
for vault in man.rpc.listvaults(["spent"], deposits)["vaults"]:
assert vault["moved_at"] is not None
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_revaulted_spend(revault_network, bitcoind, executor):
"""
Revault an ongoing Spend transaction carried out by the managers, under misc
circumstances.
"""
CSV = 12
revault_network.deploy(2, 2, n_stkmanagers=1, csv=CSV)
mans = revault_network.mans()
stks = revault_network.stks()
# Simple case. Managers Spend a single vault.
vault = revault_network.fund(0.05)
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
revault_network.spend_vaults_anyhow_unconfirmed([vault])
revault_network.cancel_vault(vault)
# Managers spend two vaults, both are canceled.
vaults = [revault_network.fund(0.05), revault_network.fund(0.1)]
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
revault_network.unvault_vaults_anyhow(vaults)
for vault in vaults:
revault_network.cancel_vault(vault)
# Managers spend three vaults, only a single one is canceled. And both of them were
# created in the same deposit transaction.
vaults = revault_network.fundmany([0.2, 0.08])
vaults.append(revault_network.fund(0.03))
for v in vaults:
revault_network.secure_vault(v)
revault_network.activate_vault(v)
revault_network.unvault_vaults_anyhow(vaults)
revault_network.cancel_vault(vaults[0])
# vaults[0] is canceled, therefore the Spend transaction is now invalid. The vaults
# should be marked as unvaulted since they are not being spent anymore.
deposits = [f"{v['txid']}:{v['vout']}" for v in vaults[1:]]
for w in mans + stks:
wait_for(
lambda: len(w.rpc.listvaults(["unvaulted"], deposits)["vaults"])
== len(deposits)
)
# Test that the coordinator will broadcast our spends
@pytest.mark.skipif(not POSTGRES_IS_SETUP, reason="Needs Postgres for servers db")
def test_coordinator_broadcast(revault_network, bitcoind, executor):
"""
Test that the coordinator broadcasts spend transactions when they become valid
"""
CSV = 12
revault_network.deploy(2, 2, n_stkmanagers=1, csv=CSV)
vault = revault_network.fund(0.05)
revault_network.secure_vault(vault)
revault_network.activate_vault(vault)
revault_network.unvault_vaults_anyhow([vault])
revault_network.stop_wallets()
bitcoind.generate_block(CSV - 1)
bitcoind.generate_block(1, wait_for_mempool=1)
revault_network.start_wallets()
for w in revault_network.participants():
wait_for(
lambda: len(w.rpc.listvaults(["spent"])["vaults"]) == 1,
)
|
import js2py, re, asyncio, random
from pyquery import PyQuery as pq
from bs4 import BeautifulSoup
from aiohttp import ClientSession
data = []
def getData():
global data
return data
def infoCallback(future):
global data
result = future.result()
src = result[0]
title = result[1]
time = result[2]
data.append(src + "----" + time + "|" + title)
# print(f"{len(data)} done | {title}")
async def crawlInfos(urls):
async with asyncio.Semaphore(500) as semaphore:
session = ClientSession()
tasks = []
for url in urls:
task = asyncio.ensure_future(getAndParseInfo(session, url))
task.add_done_callback(infoCallback)
tasks.append(task)
await asyncio.gather(*tasks)
await session.close()
return tasks
def listCallback(future):
global data
result = future.result()
data.extend(result)
async def crawlLists(urls):
async with asyncio.Semaphore(500) as semaphore:
async with ClientSession() as session:
tasks = []
for url in urls:
task = asyncio.ensure_future(getAndParseList(session, url))
task.add_done_callback(listCallback)
tasks.append(task)
await asyncio.gather(*tasks)
return tasks
async def getAndParseInfo(session, url):
html = await visit(session, url)
d = pq(html)
src = d("#player_one script").text()
src = src[20:-8]
context = js2py.EvalJs()
js_code = ''';var encode_version = 'jsjiami.com.v5', eexda = '__0x9ff10', __0x9ff10=['w7FkXcKcwqs=','VMKAw7Fhw6Q=','w5nDlTY7w4A=','wqQ5w4pKwok=','dcKnwrTCtBg=','w45yHsO3woU=','54u75py15Y6177y0PcKk5L665a2j5pyo5b2156i677yg6L+S6K2D5pW65o6D5oqo5Lmn55i/5bSn5L21','RsOzwq5fGQ==','woHDiMK0w7HDiA==','54uS5pyR5Y6r7764wr3DleS+ouWtgeaesOW/sOeooe+/nei/ruitteaWsuaOmeaKiuS4o+eateW2i+S8ng==','bMOKwqA=','V8Knwpo=','csOIwoVsG1rCiUFU','5YmL6ZiV54qm5pyC5Y2i776Lw4LCrOS+muWssOacteW8lOeqtg==','w75fMA==','YsOUwpU=','wqzDtsKcw5fDvQ==','wqNMOGfCn13DmjTClg==','wozDisOlHHI=','GiPConNN','XcKzwrDCvSg=','U8K+wofCmcO6'];(function(_0x1f2e93,_0x60307d){var _0x1f9a0b=function(_0x35f19b){while(--_0x35f19b){_0x1f2e93['push'](_0x1f2e93['shift']());}};_0x1f9a0b(++_0x60307d);}(__0x9ff10,0x152));var _0x43d9=function(_0x13228a,_0x2ce452){_0x13228a=_0x13228a-0x0;var _0x424175=__0x9ff10[_0x13228a];if(_0x43d9['initialized']===undefined){(function(){var _0x270d2c=typeof window!=='undefined'?window:typeof process==='object'&&typeof require==='function'&&typeof global==='object'?global:this;var _0x58680b='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';_0x270d2c['atob']||(_0x270d2c['atob']=function(_0x5536e1){var _0x15e9d3=String(_0x5536e1)['replace'](/=+$/,'');for(var _0x4e6299=0x0,_0x3590d2,_0x48c90b,_0x557f6a=0x0,_0x2b086d='';_0x48c90b=_0x15e9d3['charAt'](_0x557f6a++);~_0x48c90b&&(_0x3590d2=_0x4e6299%0x4?_0x3590d2*0x40+_0x48c90b:_0x48c90b,_0x4e6299++%0x4)?_0x2b086d+=String['fromCharCode'](0xff&_0x3590d2>>(-0x2*_0x4e6299&0x6)):0x0){_0x48c90b=_0x58680b['indexOf'](_0x48c90b);}return _0x2b086d;});}());var _0x4a2d38=function(_0x1f120d,_0x1d6e11){var _0x4c36f9=[],_0x1c4b64=0x0,_0x18ce5c,_0x39c9fa='',_0x6d02b2='';_0x1f120d=atob(_0x1f120d);for(var _0x13b203=0x0,_0x24d88b=_0x1f120d['length'];_0x13b203<_0x24d88b;_0x13b203++){_0x6d02b2+='%'+('00'+_0x1f120d['charCodeAt'](_0x13b203)['toString'](0x10))['slice'](-0x2);}_0x1f120d=decodeURIComponent(_0x6d02b2);for(var _0x1f76f3=0x0;_0x1f76f3<0x100;_0x1f76f3++){_0x4c36f9[_0x1f76f3]=_0x1f76f3;}for(_0x1f76f3=0x0;_0x1f76f3<0x100;_0x1f76f3++){_0x1c4b64=(_0x1c4b64+_0x4c36f9[_0x1f76f3]+_0x1d6e11['charCodeAt'](_0x1f76f3%_0x1d6e11['length']))%0x100;_0x18ce5c=_0x4c36f9[_0x1f76f3];_0x4c36f9[_0x1f76f3]=_0x4c36f9[_0x1c4b64];_0x4c36f9[_0x1c4b64]=_0x18ce5c;}_0x1f76f3=0x0;_0x1c4b64=0x0;for(var _0x2b6a92=0x0;_0x2b6a92<_0x1f120d['length'];_0x2b6a92++){_0x1f76f3=(_0x1f76f3+0x1)%0x100;_0x1c4b64=(_0x1c4b64+_0x4c36f9[_0x1f76f3])%0x100;_0x18ce5c=_0x4c36f9[_0x1f76f3];_0x4c36f9[_0x1f76f3]=_0x4c36f9[_0x1c4b64];_0x4c36f9[_0x1c4b64]=_0x18ce5c;_0x39c9fa+=String['fromCharCode'](_0x1f120d['charCodeAt'](_0x2b6a92)^_0x4c36f9[(_0x4c36f9[_0x1f76f3]+_0x4c36f9[_0x1c4b64])%0x100]);}return _0x39c9fa;};_0x43d9['rc4']=_0x4a2d38;_0x43d9['data']={};_0x43d9['initialized']=!![];}var _0x302f80=_0x43d9['data'][_0x13228a];if(_0x302f80===undefined){if(_0x43d9['once']===undefined){_0x43d9['once']=!![];}_0x424175=_0x43d9['rc4'](_0x424175,_0x2ce452);_0x43d9['data'][_0x13228a]=_0x424175;}else{_0x424175=_0x302f80;}return _0x424175;};function strencode2(_0x4f0d7a){var _0x4c6de5={'Anfny':function _0x4f6a21(_0x51d0ce,_0x5a5f36){return _0x51d0ce(_0x5a5f36);}};return _0x4c6de5[_0x43d9('0x0','fo#E')](unescape,_0x4f0d7a);};(function(_0x17883e,_0x4a42d3,_0xe4080c){var _0x301ffc={'lPNHL':function _0x1c947e(_0x4d57b6,_0x51f6a5){return _0x4d57b6!==_0x51f6a5;},'EPdUx':function _0x55f4cc(_0x34b7bc,_0x9f930c){return _0x34b7bc===_0x9f930c;},'kjFfJ':'jsjiami.com.v5','DFsBH':function _0x5f08ac(_0x1e6fa1,_0x4c0aef){return _0x1e6fa1+_0x4c0aef;},'akiuH':_0x43d9('0x1','KYjt'),'VtfeI':function _0x4f3b7b(_0x572344,_0x5f0cde){return _0x572344(_0x5f0cde);},'Deqmq':_0x43d9('0x2','oYRG'),'oKQDc':_0x43d9('0x3','i^vo'),'UMyIE':_0x43d9('0x4','oYRG'),'lRwKx':function _0x5b71b4(_0x163a75,_0x4d3998){return _0x163a75===_0x4d3998;},'TOBCR':function _0x314af8(_0x3e6efe,_0x275766){return _0x3e6efe+_0x275766;},'AUOVd':_0x43d9('0x5','lALy')};_0xe4080c='al';try{if('EqF'!==_0x43d9('0x6','xSW]')){_0xe4080c+=_0x43d9('0x7','oYRG');_0x4a42d3=encode_version;if(!(_0x301ffc[_0x43d9('0x8','fo#E')](typeof _0x4a42d3,_0x43d9('0x9','*oMH'))&&_0x301ffc[_0x43d9('0xa','ov6D')](_0x4a42d3,_0x301ffc[_0x43d9('0xb','3k]D')]))){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0xc','@&#[')]('ɾ��',_0x301ffc[_0x43d9('0xd','i^vo')]));}}else{return _0x301ffc[_0x43d9('0xe','rvlM')](unescape,input);}}catch(_0x23e6c5){if('svo'!==_0x301ffc[_0x43d9('0xf','TpCD')]){_0x17883e[_0xe4080c]('ɾ���汾�ţ�js�ᶨ�ڵ���');}else{_0xe4080c='al';try{_0xe4080c+=_0x301ffc[_0x43d9('0x10','doK*')];_0x4a42d3=encode_version;if(!(_0x301ffc[_0x43d9('0x11','ZRZ4')](typeof _0x4a42d3,_0x301ffc['UMyIE'])&&_0x301ffc[_0x43d9('0x12','@&#[')](_0x4a42d3,_0x301ffc['kjFfJ']))){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0x13','KYjt')]('ɾ��',_0x43d9('0x14','xSW]')));}}catch(_0x4202f6){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0x15','oYRG')]);}}}}(window));;encode_version = 'jsjiami.com.v5';'''
context.execute(js_code)
src = context.eval(src)
if src != None:
src = pq(src)
src = src("source").attr("src")
title = d("h4.login_register_header:first").text()
time = d("span.title-yakov:first").text()
print(f'[Done] {time} | {title.strip()}')
return src, title, time
async def getAndParseList(session, url):
m = await visit(session, url)
soup = BeautifulSoup(m, 'html.parser')
urls = soup.find_all(name='a', attrs={
"href": re.compile(r'^http(.*)view_video(.*)')
})
return [url.get('href') for url in urls]
async def visit(session, url):
randomIP = str(random.randint(0, 255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept-Language': 'zh-cn,zh;q=0.9',
'X-Forwarded-For': randomIP
}
async with ClientSession() as session:
async with session.get(url, headers=headers) as response:
response = await response.text()
return response | import js2py, re, asyncio, random
from pyquery import PyQuery as pq
from bs4 import BeautifulSoup
from aiohttp import ClientSession
data = []
def getData():
global data
return data
def infoCallback(future):
global data
result = future.result()
src = result[0]
title = result[1]
time = result[2]
data.append(src + "----" + time + "|" + title)
# print(f"{len(data)} done | {title}")
async def crawlInfos(urls):
async with asyncio.Semaphore(500) as semaphore:
session = ClientSession()
tasks = []
for url in urls:
task = asyncio.ensure_future(getAndParseInfo(session, url))
task.add_done_callback(infoCallback)
tasks.append(task)
await asyncio.gather(*tasks)
await session.close()
return tasks
def listCallback(future):
global data
result = future.result()
data.extend(result)
async def crawlLists(urls):
async with asyncio.Semaphore(500) as semaphore:
async with ClientSession() as session:
tasks = []
for url in urls:
task = asyncio.ensure_future(getAndParseList(session, url))
task.add_done_callback(listCallback)
tasks.append(task)
await asyncio.gather(*tasks)
return tasks
async def getAndParseInfo(session, url):
html = await visit(session, url)
d = pq(html)
src = d("#player_one script").text()
src = src[20:-8]
context = js2py.EvalJs()
js_code = ''';var encode_version = 'jsjiami.com.v5', eexda = '__0x9ff10', __0x9ff10=['w7FkXcKcwqs=','VMKAw7Fhw6Q=','w5nDlTY7w4A=','wqQ5w4pKwok=','dcKnwrTCtBg=','w45yHsO3woU=','54u75py15Y6177y0PcKk5L665a2j5pyo5b2156i677yg6L+S6K2D5pW65o6D5oqo5Lmn55i/5bSn5L21','RsOzwq5fGQ==','woHDiMK0w7HDiA==','54uS5pyR5Y6r7764wr3DleS+ouWtgeaesOW/sOeooe+/nei/ruitteaWsuaOmeaKiuS4o+eateW2i+S8ng==','bMOKwqA=','V8Knwpo=','csOIwoVsG1rCiUFU','5YmL6ZiV54qm5pyC5Y2i776Lw4LCrOS+muWssOacteW8lOeqtg==','w75fMA==','YsOUwpU=','wqzDtsKcw5fDvQ==','wqNMOGfCn13DmjTClg==','wozDisOlHHI=','GiPConNN','XcKzwrDCvSg=','U8K+wofCmcO6'];(function(_0x1f2e93,_0x60307d){var _0x1f9a0b=function(_0x35f19b){while(--_0x35f19b){_0x1f2e93['push'](_0x1f2e93['shift']());}};_0x1f9a0b(++_0x60307d);}(__0x9ff10,0x152));var _0x43d9=function(_0x13228a,_0x2ce452){_0x13228a=_0x13228a-0x0;var _0x424175=__0x9ff10[_0x13228a];if(_0x43d9['initialized']===undefined){(function(){var _0x270d2c=typeof window!=='undefined'?window:typeof process==='object'&&typeof require==='function'&&typeof global==='object'?global:this;var _0x58680b='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';_0x270d2c['atob']||(_0x270d2c['atob']=function(_0x5536e1){var _0x15e9d3=String(_0x5536e1)['replace'](/=+$/,'');for(var _0x4e6299=0x0,_0x3590d2,_0x48c90b,_0x557f6a=0x0,_0x2b086d='';_0x48c90b=_0x15e9d3['charAt'](_0x557f6a++);~_0x48c90b&&(_0x3590d2=_0x4e6299%0x4?_0x3590d2*0x40+_0x48c90b:_0x48c90b,_0x4e6299++%0x4)?_0x2b086d+=String['fromCharCode'](0xff&_0x3590d2>>(-0x2*_0x4e6299&0x6)):0x0){_0x48c90b=_0x58680b['indexOf'](_0x48c90b);}return _0x2b086d;});}());var _0x4a2d38=function(_0x1f120d,_0x1d6e11){var _0x4c36f9=[],_0x1c4b64=0x0,_0x18ce5c,_0x39c9fa='',_0x6d02b2='';_0x1f120d=atob(_0x1f120d);for(var _0x13b203=0x0,_0x24d88b=_0x1f120d['length'];_0x13b203<_0x24d88b;_0x13b203++){_0x6d02b2+='%'+('00'+_0x1f120d['charCodeAt'](_0x13b203)['toString'](0x10))['slice'](-0x2);}_0x1f120d=decodeURIComponent(_0x6d02b2);for(var _0x1f76f3=0x0;_0x1f76f3<0x100;_0x1f76f3++){_0x4c36f9[_0x1f76f3]=_0x1f76f3;}for(_0x1f76f3=0x0;_0x1f76f3<0x100;_0x1f76f3++){_0x1c4b64=(_0x1c4b64+_0x4c36f9[_0x1f76f3]+_0x1d6e11['charCodeAt'](_0x1f76f3%_0x1d6e11['length']))%0x100;_0x18ce5c=_0x4c36f9[_0x1f76f3];_0x4c36f9[_0x1f76f3]=_0x4c36f9[_0x1c4b64];_0x4c36f9[_0x1c4b64]=_0x18ce5c;}_0x1f76f3=0x0;_0x1c4b64=0x0;for(var _0x2b6a92=0x0;_0x2b6a92<_0x1f120d['length'];_0x2b6a92++){_0x1f76f3=(_0x1f76f3+0x1)%0x100;_0x1c4b64=(_0x1c4b64+_0x4c36f9[_0x1f76f3])%0x100;_0x18ce5c=_0x4c36f9[_0x1f76f3];_0x4c36f9[_0x1f76f3]=_0x4c36f9[_0x1c4b64];_0x4c36f9[_0x1c4b64]=_0x18ce5c;_0x39c9fa+=String['fromCharCode'](_0x1f120d['charCodeAt'](_0x2b6a92)^_0x4c36f9[(_0x4c36f9[_0x1f76f3]+_0x4c36f9[_0x1c4b64])%0x100]);}return _0x39c9fa;};_0x43d9['rc4']=_0x4a2d38;_0x43d9['data']={};_0x43d9['initialized']=!![];}var _0x302f80=_0x43d9['data'][_0x13228a];if(_0x302f80===undefined){if(_0x43d9['once']===undefined){_0x43d9['once']=!![];}_0x424175=_0x43d9['rc4'](_0x424175,_0x2ce452);_0x43d9['data'][_0x13228a]=_0x424175;}else{_0x424175=_0x302f80;}return _0x424175;};function strencode2(_0x4f0d7a){var _0x4c6de5={'Anfny':function _0x4f6a21(_0x51d0ce,_0x5a5f36){return _0x51d0ce(_0x5a5f36);}};return _0x4c6de5[_0x43d9('0x0','fo#E')](unescape,_0x4f0d7a);};(function(_0x17883e,_0x4a42d3,_0xe4080c){var _0x301ffc={'lPNHL':function _0x1c947e(_0x4d57b6,_0x51f6a5){return _0x4d57b6!==_0x51f6a5;},'EPdUx':function _0x55f4cc(_0x34b7bc,_0x9f930c){return _0x34b7bc===_0x9f930c;},'kjFfJ':'jsjiami.com.v5','DFsBH':function _0x5f08ac(_0x1e6fa1,_0x4c0aef){return _0x1e6fa1+_0x4c0aef;},'akiuH':_0x43d9('0x1','KYjt'),'VtfeI':function _0x4f3b7b(_0x572344,_0x5f0cde){return _0x572344(_0x5f0cde);},'Deqmq':_0x43d9('0x2','oYRG'),'oKQDc':_0x43d9('0x3','i^vo'),'UMyIE':_0x43d9('0x4','oYRG'),'lRwKx':function _0x5b71b4(_0x163a75,_0x4d3998){return _0x163a75===_0x4d3998;},'TOBCR':function _0x314af8(_0x3e6efe,_0x275766){return _0x3e6efe+_0x275766;},'AUOVd':_0x43d9('0x5','lALy')};_0xe4080c='al';try{if('EqF'!==_0x43d9('0x6','xSW]')){_0xe4080c+=_0x43d9('0x7','oYRG');_0x4a42d3=encode_version;if(!(_0x301ffc[_0x43d9('0x8','fo#E')](typeof _0x4a42d3,_0x43d9('0x9','*oMH'))&&_0x301ffc[_0x43d9('0xa','ov6D')](_0x4a42d3,_0x301ffc[_0x43d9('0xb','3k]D')]))){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0xc','@&#[')]('ɾ��',_0x301ffc[_0x43d9('0xd','i^vo')]));}}else{return _0x301ffc[_0x43d9('0xe','rvlM')](unescape,input);}}catch(_0x23e6c5){if('svo'!==_0x301ffc[_0x43d9('0xf','TpCD')]){_0x17883e[_0xe4080c]('ɾ���汾�ţ�js�ᶨ�ڵ���');}else{_0xe4080c='al';try{_0xe4080c+=_0x301ffc[_0x43d9('0x10','doK*')];_0x4a42d3=encode_version;if(!(_0x301ffc[_0x43d9('0x11','ZRZ4')](typeof _0x4a42d3,_0x301ffc['UMyIE'])&&_0x301ffc[_0x43d9('0x12','@&#[')](_0x4a42d3,_0x301ffc['kjFfJ']))){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0x13','KYjt')]('ɾ��',_0x43d9('0x14','xSW]')));}}catch(_0x4202f6){_0x17883e[_0xe4080c](_0x301ffc[_0x43d9('0x15','oYRG')]);}}}}(window));;encode_version = 'jsjiami.com.v5';'''
context.execute(js_code)
src = context.eval(src)
if src != None:
src = pq(src)
src = src("source").attr("src")
title = d("h4.login_register_header:first").text()
time = d("span.title-yakov:first").text()
print(f'[Done] {time} | {title.strip()}')
return src, title, time
async def getAndParseList(session, url):
m = await visit(session, url)
soup = BeautifulSoup(m, 'html.parser')
urls = soup.find_all(name='a', attrs={
"href": re.compile(r'^http(.*)view_video(.*)')
})
return [url.get('href') for url in urls]
async def visit(session, url):
randomIP = str(random.randint(0, 255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept-Language': 'zh-cn,zh;q=0.9',
'X-Forwarded-For': randomIP
}
async with ClientSession() as session:
async with session.get(url, headers=headers) as response:
response = await response.text()
return response |
from typing import Optional
from cleo.helpers import argument
from poetry.console.commands.command import Command
class SourceShowCommand(Command):
name = "source show"
description = "Show information about sources configured for the project."
arguments = [
argument(
"source",
"Source(s) to show information for. Defaults to showing all sources.",
optional=True,
multiple=True,
),
]
def handle(self) -> Optional[int]:
sources = self.poetry.get_sources()
names = self.argument("source")
if not sources:
self.line("No sources configured for this project.")
return 0
if names and not any(map(lambda s: s.name in names, sources)):
self.line_error(f"No source found with name(s): {", ".join(names)}")
return 1
bool_string = {
True: "yes",
False: "no",
}
for source in sources:
if names and source.name not in names:
continue
table = self.table(style="compact")
rows = [
["<info>name</>", " : <c1>{}</>".format(source.name)],
["<info>url</>", " : {}".format(source.url)],
[
"<info>default</>",
" : {}".format(bool_string.get(source.default, False)),
],
[
"<info>secondary</>",
" : {}".format(bool_string.get(source.secondary, False)),
],
[
"<info>targeted</>",
" : {}".format(bool_string.get(source.targeted, False)),
],
]
table.add_rows(rows)
table.render()
self.line("")
return 0
| from typing import Optional
from cleo.helpers import argument
from poetry.console.commands.command import Command
class SourceShowCommand(Command):
name = "source show"
description = "Show information about sources configured for the project."
arguments = [
argument(
"source",
"Source(s) to show information for. Defaults to showing all sources.",
optional=True,
multiple=True,
),
]
def handle(self) -> Optional[int]:
sources = self.poetry.get_sources()
names = self.argument("source")
if not sources:
self.line("No sources configured for this project.")
return 0
if names and not any(map(lambda s: s.name in names, sources)):
self.line_error(f"No source found with name(s): {', '.join(names)}")
return 1
bool_string = {
True: "yes",
False: "no",
}
for source in sources:
if names and source.name not in names:
continue
table = self.table(style="compact")
rows = [
["<info>name</>", " : <c1>{}</>".format(source.name)],
["<info>url</>", " : {}".format(source.url)],
[
"<info>default</>",
" : {}".format(bool_string.get(source.default, False)),
],
[
"<info>secondary</>",
" : {}".format(bool_string.get(source.secondary, False)),
],
[
"<info>targeted</>",
" : {}".format(bool_string.get(source.targeted, False)),
],
]
table.add_rows(rows)
table.render()
self.line("")
return 0
|
import torch
import torch.cuda
import logging
import numpy as np
import pandas as pd
import random
from datetime import datetime
import os
import sys
sys.path.append('../../')
import click
from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset
from src.models.AE_DMSAD import AE_DMSAD
from src.models.networks.AE_network import AE_net, Encoder
from src.utils.utils import summary_string
from src.utils.Config import Config
@click.command()
@click.argument('config_path', type=click.Path(exists=True))
def main(config_path):
"""
Train a DMSAD on the MURA dataset using a AE pretraining.
"""
# Load config file
cfg = Config(settings=None)
cfg.load_config(config_path)
# Get path to output
OUTPUT_PATH = cfg.settings['PATH']['OUTPUT'] + cfg.settings['Experiment_Name'] + '/'#+ datetime.today().strftime('%Y_%m_%d_%Hh%M')+'/'
# make output dir
if not os.path.isdir(OUTPUT_PATH+'models/'): os.makedirs(OUTPUT_PATH+'model/', exist_ok=True)
if not os.path.isdir(OUTPUT_PATH+'results/'): os.makedirs(OUTPUT_PATH+'results/', exist_ok=True)
if not os.path.isdir(OUTPUT_PATH+'logs/'): os.makedirs(OUTPUT_PATH+'logs/', exist_ok=True)
for seed_i, seed in enumerate(cfg.settings['seeds']):
############################### Set Up #################################
# initialize logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
try:
logger.handlers[1].stream.close()
logger.removeHandler(logger.handlers[1])
except IndexError:
pass
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
log_file = OUTPUT_PATH + 'logs/' + f'log_{seed_i+1}.txt'
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# print path
logger.info(f"Log file : {log_file}")
logger.info(f"Data path : {cfg.settings["PATH"]["DATA"]}")
logger.info(f"Outputs path : {OUTPUT_PATH}" + "\n")
# Set seed
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
logger.info(f"Set seed {seed_i+1:02}/{len(cfg.settings["seeds"]):02} to {seed}")
# set number of thread
if cfg.settings['n_thread'] > 0:
torch.set_num_threads(cfg.settings['n_thread'])
# check if GPU available
cfg.settings['device'] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Print technical info in logger
logger.info(f"Device : {cfg.settings["device"]}")
logger.info(f"Number of thread : {cfg.settings["n_thread"]}")
############################### Split Data #############################
# Load data informations
df_info = pd.read_csv(cfg.settings['PATH']['DATA_INFO'])
df_info = df_info.drop(df_info.columns[0], axis=1)
# remove low contrast images (all black)
df_info = df_info[df_info.low_contrast == 0]
# Train Validation Test Split
spliter = MURA_TrainValidTestSplitter(df_info, train_frac=cfg.settings['Split']['train_frac'],
ratio_known_normal=cfg.settings['Split']['known_normal'],
ratio_known_abnormal=cfg.settings['Split']['known_abnormal'],
random_state=42)
spliter.split_data(verbose=False)
train_df = spliter.get_subset('train')
valid_df = spliter.get_subset('valid')
test_df = spliter.get_subset('test')
# print info to logger
for key, value in cfg.settings['Split'].items():
logger.info(f"Split param {key} : {value}")
logger.info("Split Summary \n" + str(spliter.print_stat(returnTable=True)))
############################# Build Model #############################
# make networks
net_AE = AE_net(MLP_Neurons_layer_enc=cfg.settings['AE']['MLP_head_enc'], MLP_Neurons_layer_dec=cfg.settings['AE']['MLP_head_dec'], output_channels=1)
net_AE = net_AE.to(cfg.settings['device'])
net_DMSAD = Encoder(MLP_Neurons_layer=cfg.settings['DMSAD']['MLP_head'])
net_DMSAD = net_DMSAD.to(cfg.settings['device'])
# print network architecture
net_architecture = summary_string(net_AE, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']),
batch_size=cfg.settings['AE']['batch_size'], device=str(cfg.settings['device']))
logger.info("AE net architecture: \n" + net_architecture + '\n')
net_architecture = summary_string(net_DMSAD, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']),
batch_size=cfg.settings['DMSAD']['batch_size'], device=str(cfg.settings['device']))
logger.info("DMSAD net architecture: \n" + net_architecture + '\n')
# make model
ae_DMSAD = AE_DMSAD(net_AE, net_DMSAD, eta=cfg.settings['DMSAD']['eta'], gamma=cfg.settings['DMSAD']['gamma'])
############################### Train AE ###############################
# make dataset
train_dataset_AD = MURA_Dataset(train_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
valid_dataset_AD = MURA_Dataset(valid_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
test_dataset_AD = MURA_Dataset(test_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
logger.info("Online preprocessing pipeline : \n" + str(train_dataset_AD.transform) + "\n")
# Load model if required
if cfg.settings['AE']['model_path_to_load']:
ae_DMSAD.load_ae_net(cfg.settings['AE']['model_path_to_load'][seed_i], map_location=cfg.settings['device'])
logger.info(f"AE Model Loaded from {cfg.settings["AE"]["model_path_to_load"][seed_i]}" + "\n")
# print Train parameters
for key, value in cfg.settings['AE'].items():
logger.info(f"AE {key} : {value}")
# Train AE
ae_DMSAD.train_AE(train_dataset_AD, valid_dataset=None,
n_epoch=cfg.settings['AE']['n_epoch'],
batch_size=cfg.settings['AE']['batch_size'],
lr=cfg.settings['AE']['lr'],
weight_decay=cfg.settings['AE']['weight_decay'],
lr_milestone=cfg.settings['AE']['lr_milestone'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'])
# Evaluate AE to get embeddings
ae_DMSAD.evaluate_AE(valid_dataset_AD, batch_size=cfg.settings['AE']['batch_size'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='valid')
ae_DMSAD.evaluate_AE(test_dataset_AD, batch_size=cfg.settings['AE']['batch_size'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='test')
# save repr net
ae_DMSAD.save_ae_net(OUTPUT_PATH + f'model/AE_net_{seed_i+1}.pt')
logger.info("AE model saved at " + OUTPUT_PATH + f"model/AE_net_{seed_i+1}.pt")
# save Results
ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json')
logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json")
######################## Transfer Encoder Weight #######################
ae_DMSAD.transfer_encoder()
############################## Train DMSAD #############################
# Load model if required
if cfg.settings['DMSAD']['model_path_to_load']:
ae_DMSAD.load_AD(cfg.settings['DMSAD']['model_path_to_load'], map_location=cfg.settings['device'])
logger.info(f"DMSAD Model Loaded from {cfg.settings["DMSAD"]["model_path_to_load"]} \n")
# print Train parameters
for key, value in cfg.settings['DMSAD'].items():
logger.info(f"DMSAD {key} : {value}")
# Train DMSAD
ae_DMSAD.train_AD(train_dataset_AD, valid_dataset=valid_dataset_AD,
n_sphere_init=cfg.settings['DMSAD']['n_sphere_init'],
n_epoch=cfg.settings['DMSAD']['n_epoch'],
batch_size=cfg.settings['DMSAD']['batch_size'],
lr=cfg.settings['DMSAD']['lr'],
weight_decay=cfg.settings['DMSAD']['weight_decay'],
lr_milestone=cfg.settings['DMSAD']['lr_milestone'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
checkpoint_path=OUTPUT_PATH + f'DMSAD_checkpoint_{seed_i+1}.pt')
logger.info('--- Validation')
ae_DMSAD.evaluate_AD(valid_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='valid')
logger.info('--- Test')
ae_DMSAD.evaluate_AD(test_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='test')
# save DMSAD
ae_DMSAD.save_AD(OUTPUT_PATH + f'model/DMSAD_{seed_i+1}.pt')
logger.info("model saved at " + OUTPUT_PATH + f"model/DMSAD_{seed_i+1}.pt")
########################## Save Results ################################
# save Results
ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json')
logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json")
# save config file
cfg.settings['device'] = str(cfg.settings['device'])
cfg.save_config(OUTPUT_PATH + 'config.json')
logger.info("Config saved at " + OUTPUT_PATH + "config.json")
if __name__ == '__main__':
main()
| import torch
import torch.cuda
import logging
import numpy as np
import pandas as pd
import random
from datetime import datetime
import os
import sys
sys.path.append('../../')
import click
from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset
from src.models.AE_DMSAD import AE_DMSAD
from src.models.networks.AE_network import AE_net, Encoder
from src.utils.utils import summary_string
from src.utils.Config import Config
@click.command()
@click.argument('config_path', type=click.Path(exists=True))
def main(config_path):
"""
Train a DMSAD on the MURA dataset using a AE pretraining.
"""
# Load config file
cfg = Config(settings=None)
cfg.load_config(config_path)
# Get path to output
OUTPUT_PATH = cfg.settings['PATH']['OUTPUT'] + cfg.settings['Experiment_Name'] + '/'#+ datetime.today().strftime('%Y_%m_%d_%Hh%M')+'/'
# make output dir
if not os.path.isdir(OUTPUT_PATH+'models/'): os.makedirs(OUTPUT_PATH+'model/', exist_ok=True)
if not os.path.isdir(OUTPUT_PATH+'results/'): os.makedirs(OUTPUT_PATH+'results/', exist_ok=True)
if not os.path.isdir(OUTPUT_PATH+'logs/'): os.makedirs(OUTPUT_PATH+'logs/', exist_ok=True)
for seed_i, seed in enumerate(cfg.settings['seeds']):
############################### Set Up #################################
# initialize logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
try:
logger.handlers[1].stream.close()
logger.removeHandler(logger.handlers[1])
except IndexError:
pass
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
log_file = OUTPUT_PATH + 'logs/' + f'log_{seed_i+1}.txt'
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# print path
logger.info(f"Log file : {log_file}")
logger.info(f"Data path : {cfg.settings['PATH']['DATA']}")
logger.info(f"Outputs path : {OUTPUT_PATH}" + "\n")
# Set seed
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
logger.info(f"Set seed {seed_i+1:02}/{len(cfg.settings['seeds']):02} to {seed}")
# set number of thread
if cfg.settings['n_thread'] > 0:
torch.set_num_threads(cfg.settings['n_thread'])
# check if GPU available
cfg.settings['device'] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Print technical info in logger
logger.info(f"Device : {cfg.settings['device']}")
logger.info(f"Number of thread : {cfg.settings['n_thread']}")
############################### Split Data #############################
# Load data informations
df_info = pd.read_csv(cfg.settings['PATH']['DATA_INFO'])
df_info = df_info.drop(df_info.columns[0], axis=1)
# remove low contrast images (all black)
df_info = df_info[df_info.low_contrast == 0]
# Train Validation Test Split
spliter = MURA_TrainValidTestSplitter(df_info, train_frac=cfg.settings['Split']['train_frac'],
ratio_known_normal=cfg.settings['Split']['known_normal'],
ratio_known_abnormal=cfg.settings['Split']['known_abnormal'],
random_state=42)
spliter.split_data(verbose=False)
train_df = spliter.get_subset('train')
valid_df = spliter.get_subset('valid')
test_df = spliter.get_subset('test')
# print info to logger
for key, value in cfg.settings['Split'].items():
logger.info(f"Split param {key} : {value}")
logger.info("Split Summary \n" + str(spliter.print_stat(returnTable=True)))
############################# Build Model #############################
# make networks
net_AE = AE_net(MLP_Neurons_layer_enc=cfg.settings['AE']['MLP_head_enc'], MLP_Neurons_layer_dec=cfg.settings['AE']['MLP_head_dec'], output_channels=1)
net_AE = net_AE.to(cfg.settings['device'])
net_DMSAD = Encoder(MLP_Neurons_layer=cfg.settings['DMSAD']['MLP_head'])
net_DMSAD = net_DMSAD.to(cfg.settings['device'])
# print network architecture
net_architecture = summary_string(net_AE, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']),
batch_size=cfg.settings['AE']['batch_size'], device=str(cfg.settings['device']))
logger.info("AE net architecture: \n" + net_architecture + '\n')
net_architecture = summary_string(net_DMSAD, (1, cfg.settings['Split']['img_size'], cfg.settings['Split']['img_size']),
batch_size=cfg.settings['DMSAD']['batch_size'], device=str(cfg.settings['device']))
logger.info("DMSAD net architecture: \n" + net_architecture + '\n')
# make model
ae_DMSAD = AE_DMSAD(net_AE, net_DMSAD, eta=cfg.settings['DMSAD']['eta'], gamma=cfg.settings['DMSAD']['gamma'])
############################### Train AE ###############################
# make dataset
train_dataset_AD = MURA_Dataset(train_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
valid_dataset_AD = MURA_Dataset(valid_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
test_dataset_AD = MURA_Dataset(test_df, data_path=cfg.settings['PATH']['DATA'], load_mask=True,
load_semilabels=True, output_size=cfg.settings['Split']['img_size'])
logger.info("Online preprocessing pipeline : \n" + str(train_dataset_AD.transform) + "\n")
# Load model if required
if cfg.settings['AE']['model_path_to_load']:
ae_DMSAD.load_ae_net(cfg.settings['AE']['model_path_to_load'][seed_i], map_location=cfg.settings['device'])
logger.info(f"AE Model Loaded from {cfg.settings['AE']['model_path_to_load'][seed_i]}" + "\n")
# print Train parameters
for key, value in cfg.settings['AE'].items():
logger.info(f"AE {key} : {value}")
# Train AE
ae_DMSAD.train_AE(train_dataset_AD, valid_dataset=None,
n_epoch=cfg.settings['AE']['n_epoch'],
batch_size=cfg.settings['AE']['batch_size'],
lr=cfg.settings['AE']['lr'],
weight_decay=cfg.settings['AE']['weight_decay'],
lr_milestone=cfg.settings['AE']['lr_milestone'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'])
# Evaluate AE to get embeddings
ae_DMSAD.evaluate_AE(valid_dataset_AD, batch_size=cfg.settings['AE']['batch_size'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='valid')
ae_DMSAD.evaluate_AE(test_dataset_AD, batch_size=cfg.settings['AE']['batch_size'],
n_job_dataloader=cfg.settings['AE']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='test')
# save repr net
ae_DMSAD.save_ae_net(OUTPUT_PATH + f'model/AE_net_{seed_i+1}.pt')
logger.info("AE model saved at " + OUTPUT_PATH + f"model/AE_net_{seed_i+1}.pt")
# save Results
ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json')
logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json")
######################## Transfer Encoder Weight #######################
ae_DMSAD.transfer_encoder()
############################## Train DMSAD #############################
# Load model if required
if cfg.settings['DMSAD']['model_path_to_load']:
ae_DMSAD.load_AD(cfg.settings['DMSAD']['model_path_to_load'], map_location=cfg.settings['device'])
logger.info(f"DMSAD Model Loaded from {cfg.settings['DMSAD']['model_path_to_load']} \n")
# print Train parameters
for key, value in cfg.settings['DMSAD'].items():
logger.info(f"DMSAD {key} : {value}")
# Train DMSAD
ae_DMSAD.train_AD(train_dataset_AD, valid_dataset=valid_dataset_AD,
n_sphere_init=cfg.settings['DMSAD']['n_sphere_init'],
n_epoch=cfg.settings['DMSAD']['n_epoch'],
batch_size=cfg.settings['DMSAD']['batch_size'],
lr=cfg.settings['DMSAD']['lr'],
weight_decay=cfg.settings['DMSAD']['weight_decay'],
lr_milestone=cfg.settings['DMSAD']['lr_milestone'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
checkpoint_path=OUTPUT_PATH + f'DMSAD_checkpoint_{seed_i+1}.pt')
logger.info('--- Validation')
ae_DMSAD.evaluate_AD(valid_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='valid')
logger.info('--- Test')
ae_DMSAD.evaluate_AD(test_dataset_AD, batch_size=cfg.settings['DMSAD']['batch_size'],
n_job_dataloader=cfg.settings['DMSAD']['num_worker'],
device=cfg.settings['device'],
print_batch_progress=cfg.settings['print_batch_progress'],
set='test')
# save DMSAD
ae_DMSAD.save_AD(OUTPUT_PATH + f'model/DMSAD_{seed_i+1}.pt')
logger.info("model saved at " + OUTPUT_PATH + f"model/DMSAD_{seed_i+1}.pt")
########################## Save Results ################################
# save Results
ae_DMSAD.save_results(OUTPUT_PATH + f'results/results_{seed_i+1}.json')
logger.info("Results saved at " + OUTPUT_PATH + f"results/results_{seed_i+1}.json")
# save config file
cfg.settings['device'] = str(cfg.settings['device'])
cfg.save_config(OUTPUT_PATH + 'config.json')
logger.info("Config saved at " + OUTPUT_PATH + "config.json")
if __name__ == '__main__':
main()
|
"""Load metadata from CSV files and export in JSON format."""
import os
import logging
from collections import namedtuple
from functools import lru_cache
from ons_csv_to_ctb_json_bilingual import BilingualDict, Bilingual
from ons_csv_to_ctb_json_read import Reader, required, optional
from ons_csv_to_ctb_json_geo import read_geo_cats
from ons_csv_to_ctb_json_ds_vars import DatasetVarsBuilder, DatasetVariables
PUBLIC_SECURITY_MNEMONIC = 'PUB'
GEOGRAPHIC_VARIABLE_TYPE = 'GEOG'
DatabaseVariables = namedtuple('DatabaseVariables', 'variables lowest_geog_variable')
def isnumeric(string):
"""Check whether the string is numeric."""
return string.isnumeric()
def is_y_or_n(string):
"""Return true if the string is either 'Y' or 'N'."""
return string in ['Y', 'N']
def isoneof(valid_values):
"""Return a function that checks whether the value is in the specified set of values."""
valid_values_set = set(valid_values)
def validate_fn(value):
"""Check if value is in set."""
return value in valid_values_set
return validate_fn
def append_to_list_in_dict(dictionary, key, value):
"""
Append a value to the list at dictionary[key].
An empty list is first created if the key is not in dictionary.
"""
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
class Loader:
"""
Loader contains methods for loading metadata objects from CSV files.
Some of the CSV source files contain information on metadata objects. Basic validation is
performed on each row e.g. to verify that required fields are populated,foreign keys are valid
etc. The raw data is then modified and relationships to other objects are resolved to create
a hierarchical representation of the metadata. Other files contain relationships between
objects. The data in these files is also validated and relationships between objects are
created.
Many of the fields in this class are cached properties, with the data loaded on first access.
"""
def __init__(self, input_directory, geography_file, best_effort=False):
"""Initialise MetadataLoader object."""
self.input_directory = input_directory
self.geography_file = geography_file
self._error_count = 0
def raise_value_error(msg):
"""Raise a ValueError exception."""
raise ValueError(msg)
def log_error(msg):
"""Log the error."""
self._error_count += 1
logging.warning(msg)
self.recoverable_error = log_error if best_effort else raise_value_error
def error_count(self):
"""Return number of errors."""
return self._error_count
def read_file(self, filename, columns, unique_combo_fields=None):
"""
Read data from a CSV file.
A list of ons_csv_to_ctb_json_read.Row objects is returned. Each Row contains the data
and corresponding line number.
"""
full_filename = self.full_filename(filename)
return Reader(full_filename, columns, self.recoverable_error, unique_combo_fields).read()
def full_filename(self, filename):
"""Add the input_directory path to the filename."""
return os.path.join(self.input_directory, filename)
@property
@lru_cache(maxsize=1)
def contacts(self):
"""Load contacts."""
columns = [
required('Contact_Id', unique=True),
required('Contact_Name'),
required('Contact_Email'),
optional('Contact_Phone'),
optional('Contact_Website'),
]
contact_rows = self.read_file('Contact.csv', columns)
contacts = {}
for contact, _ in contact_rows:
contacts[contact['Contact_Id']] = BilingualDict(contact)
return contacts
@property
@lru_cache(maxsize=1)
def sources(self):
"""Load sources."""
columns = [
required('Source_Mnemonic', unique=True),
required('Source_Description'),
required('Id'),
required('Version'),
optional('Source_Description_Welsh'),
optional('Copyright_Statement'),
optional('Licence'),
optional('Nationals_Statistic_Certified'),
optional('Methodology_Link'),
optional('Methodology_Statement'),
optional('Methodology_Statement_Welsh'),
optional('SDC_Link'),
optional('SDC_Statement'),
optional('SDC_Statement_Welsh'),
optional('Contact_Id', validate_fn=isoneof(self.contacts.keys())),
]
source_rows = self.read_file('Source.csv', columns)
sources = {}
for source, _ in source_rows:
source['Source_Description'] = Bilingual(
source.pop('Source_Description'),
source.pop('Source_Description_Welsh'))
source['Methodology_Statement'] = Bilingual(
source.pop('Methodology_Statement'),
source.pop('Methodology_Statement_Welsh'))
source['SDC_Statement'] = Bilingual(
source.pop('SDC_Statement'),
source.pop('SDC_Statement_Welsh'))
source['Contact'] = self.contacts.get(source.pop('Contact_Id'), None)
del source['Id']
sources[source['Source_Mnemonic']] = BilingualDict(source)
return sources
@property
@lru_cache(maxsize=1)
def census_releases(self):
"""Load census releases."""
columns = [
required('Census_Release_Number', unique=True),
required('Census_Release_Description'),
required('Release_Date'),
required('Id'),
]
census_release_rows = self.read_file('Census_Release.csv', columns)
census_releases = {}
for census_release, _ in census_release_rows:
del census_release['Id']
census_releases[census_release['Census_Release_Number']] = BilingualDict(
census_release)
return census_releases
@property
@lru_cache(maxsize=1)
def security_classifications(self):
"""
Load security classifications.
Security classifications are not explicitly exported in the JSON output. Only datasets
and classifications with a public security classification are exported.
"""
filename = 'Security_Classification.csv'
columns = [
required('Security_Mnemonic', unique=True),
required('Id'),
required('Security_Description'),
optional('Security_Description_Welsh'),
]
security_classification_rows = self.read_file(filename, columns)
security_classifications = {sc.data['Security_Mnemonic'] for sc in
security_classification_rows}
# PUBLIC_SECURITY_MNEMONIC is used to identify datasets, variables and classifications that
# should be included in the JSON output. Ensure that it is one of the Security_Mnemonic
# values in the source file.
if PUBLIC_SECURITY_MNEMONIC not in security_classifications:
raise ValueError(f'{PUBLIC_SECURITY_MNEMONIC} not found as Security_Mnemonic for any '
f'entry in {self.full_filename(filename)}')
return security_classifications
@property
@lru_cache(maxsize=1)
def statistical_units(self):
"""Load statistical units."""
columns = [
required('Statistical_Unit', unique=True),
required('Statistical_Unit_Description'),
required('Id'),
optional('Statistical_Unit_Description_Welsh'),
]
statistical_unit_rows = self.read_file('Statistical_Unit.csv', columns)
statistical_units = {}
for stat_unit, _ in statistical_unit_rows:
stat_unit['Statistical_Unit_Description'] = Bilingual(
stat_unit.pop('Statistical_Unit_Description'),
stat_unit.pop('Statistical_Unit_Description_Welsh'))
del stat_unit['Id']
statistical_units[stat_unit['Statistical_Unit']] = BilingualDict(stat_unit)
return statistical_units
@property
@lru_cache(maxsize=1)
def datasets(self):
"""Load datasets."""
filename = 'Dataset.csv'
columns = [
required('Dataset_Mnemonic', unique=True),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Database_Mnemonic', validate_fn=isoneof(self.databases.keys())),
required('Dataset_Title'),
required('Id'),
required('Geographic_Coverage'),
required('Dataset_Population'),
required('Statistical_Unit', validate_fn=isoneof(self.statistical_units.keys())),
required('Version'),
required('Dataset_Description'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
optional('Dataset_Title_Welsh'),
optional('Dataset_Description_Welsh'),
optional('Dataset_Mnemonic_2011'),
optional('Geographic_Coverage_Welsh'),
optional('Dataset_Population_Welsh'),
optional('Last_Updated'),
optional('Unique_Url'),
optional('Contact_Id', validate_fn=isoneof(self.contacts.keys())),
]
dataset_rows = self.read_file(filename, columns)
dataset_mnemonics = [d.data['Dataset_Mnemonic'] for d in dataset_rows]
dataset_to_related_datasets = self.load_dataset_to_related(dataset_mnemonics)
dataset_to_keywords = self.load_dataset_to_keywords(dataset_mnemonics)
dataset_to_publications = self.load_dataset_to_publications(dataset_mnemonics)
dataset_to_releases = self.load_dataset_to_releases(dataset_mnemonics)
dataset_to_variables = self.load_dataset_to_variables(dataset_mnemonics)
datasets = {}
for dataset, row_num in dataset_rows:
dataset_mnemonic = dataset.pop('Dataset_Mnemonic')
database_mnemonic = dataset.pop('Database_Mnemonic')
dataset['Geographic_Coverage'] = Bilingual(dataset.pop('Geographic_Coverage'),
dataset.pop('Geographic_Coverage_Welsh'))
dataset['Dataset_Population'] = Bilingual(dataset.pop('Dataset_Population'),
dataset.pop('Dataset_Population_Welsh'))
dataset['Statistical_Unit'] = self.statistical_units.get(
dataset.pop('Statistical_Unit'), None)
dataset['Contact'] = self.contacts.get(dataset.pop('Contact_Id'), None)
dataset['Keywords'] = dataset_to_keywords.get(dataset_mnemonic, [])
dataset['Related_Datasets'] = dataset_to_related_datasets.get(dataset_mnemonic, [])
dataset['Census_Releases'] = dataset_to_releases.get(dataset_mnemonic, [])
dataset['Publications'] = dataset_to_publications.get(dataset_mnemonic, [])
dataset_variables = dataset_to_variables.get(
dataset_mnemonic, DatasetVariables([], []))
alternate_geog_variables = (dataset_variables.alternate_geog_variables if
dataset_variables.alternate_geog_variables else [])
dataset['Alternate_Geographic_Variables'] = alternate_geog_variables
all_classifications = dataset_variables.classifications + alternate_geog_variables
# If the dataset is public then ensure that there is at least one classification and
# that all the classifications are also public.
if dataset['Security_Mnemonic'] == PUBLIC_SECURITY_MNEMONIC:
drop_dataset = False
if not dataset_variables.classifications:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} {dataset_mnemonic} '
'has no associated classifications or geographic variable')
drop_dataset = True
for classification in all_classifications:
if self.classifications[classification].private['Security_Mnemonic'] != \
PUBLIC_SECURITY_MNEMONIC:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} Public ONS '
f'dataset {dataset_mnemonic} has non-public classification '
f'{classification}')
drop_dataset = True
if classification not in \
self.databases[database_mnemonic].private['Classifications']:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} '
f'{dataset_mnemonic} has classification {classification} '
f'that is not in database {database_mnemonic}')
drop_dataset = True
if drop_dataset:
logging.warning(
f'Reading {self.full_filename(filename)}:{row_num} dropping record')
continue
del dataset['Id']
del dataset['Signed_Off_Flag']
datasets[dataset_mnemonic] = BilingualDict(
dataset,
# The private fields and not included in the English/Welsh variants of datasets.
# They are used later in processing and included in different parts of the metadata
# hierarchy.
private={
'Security_Mnemonic': dataset.pop('Security_Mnemonic'),
'Dataset_Title': Bilingual(dataset.pop('Dataset_Title'),
dataset.pop('Dataset_Title_Welsh')),
'Dataset_Description': Bilingual(dataset.pop('Dataset_Description'),
dataset.pop('Dataset_Description_Welsh')),
'Database_Mnemonic': database_mnemonic,
'Classifications': dataset_variables.classifications,
})
return datasets
@property
@lru_cache(maxsize=1)
def databases(self):
"""Load databases."""
columns = [
required('Database_Mnemonic', unique=True),
required('Source_Mnemonic', validate_fn=isoneof(self.sources.keys())),
required('Database_Title'),
required('Id'),
required('Database_Description'),
required('Version'),
# This should be mandatory but is not yet populated
optional('Cantabular_DB_Flag', validate_fn=is_y_or_n),
optional('Database_Title_Welsh'),
optional('Database_Description_Welsh'),
optional('IAR_Asset_Id'),
]
database_rows = self.read_file('Database.csv', columns)
database_mnemonics = [d.data['Database_Mnemonic'] for d in database_rows]
database_to_variables = self.load_database_to_variables(database_mnemonics)
databases = {}
for database, _ in database_rows:
database['Source'] = self.sources.get(database.pop('Source_Mnemonic'), None)
del database['Id']
del database['IAR_Asset_Id']
database_mnemonic = database.pop('Database_Mnemonic')
db_vars = database_to_variables.get(database_mnemonic, DatabaseVariables([], None))
classifications = [k for k, v in self.classifications.items() if
v.private['Variable_Mnemonic'] in db_vars.variables]
database['Lowest_Geog_Variable'] = db_vars.lowest_geog_variable
databases[database_mnemonic] = BilingualDict(
database,
# Database_Title is used to populate a Cantabular built-in field.
private={'Database_Title': Bilingual(database.pop('Database_Title'),
database.pop('Database_Title_Welsh')),
'Database_Description': Bilingual(
database.pop('Database_Description'),
database.pop('Database_Description_Welsh')),
'Classifications': classifications})
return databases
@property
@lru_cache(maxsize=1)
def categories(self):
"""
Load categories.
Cantabular has a built-in catLabels concept. This is a dictionary of category codes to
category labels. The data from the categories file are converted to this format.
English values are excluded. These will be present in the codebook. Unpopulated Welsh
values are also excluded.
"""
filename = 'Category.csv'
columns = [
required('Category_Code'),
required('Classification_Mnemonic', validate_fn=isoneof(self.classifications.keys())),
required('Internal_Category_Label_English'),
required('Id'),
required('Variable_Mnemonic'),
required('Version'),
# Sort_Order values are not validated as this is an optional field.
optional('Sort_Order'),
optional('External_Category_Label_English'),
optional('External_Category_Label_Welsh'),
]
category_rows = self.read_file(
filename, columns,
# There can only be one row for each Category_Code/Classification_Mnemonic combination.
unique_combo_fields=['Category_Code', 'Classification_Mnemonic'])
classification_to_cats = {}
for cat, row_num in category_rows:
classification_mnemonic = cat['Classification_Mnemonic']
if self.classifications[classification_mnemonic].private['Is_Geographic']:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
'found category for geographic classification '
f'{classification_mnemonic}: all categories for geographic '
'classifications must be in a separate lookup file')
append_to_list_in_dict(classification_to_cats, classification_mnemonic, cat)
categories = {}
for classification_mnemonic, one_var_categories in classification_to_cats.items():
num_cat_items = \
self.classifications[classification_mnemonic].private['Number_Of_Category_Items']
if num_cat_items and len(one_var_categories) != num_cat_items:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
f'Unexpected number of categories for {classification_mnemonic}: '
f'expected {num_cat_items} but found {len(one_var_categories)}')
welsh_cats = {cat['Category_Code']: cat['External_Category_Label_Welsh']
for cat in one_var_categories if cat['External_Category_Label_Welsh']}
if welsh_cats:
categories[classification_mnemonic] = Bilingual(None, welsh_cats)
# Categories for geographic variables are supplied in a separate file.
if not self.geography_file:
logging.info('No geography file specified')
return categories
for class_name, geo_cats in read_geo_cats(self.geography_file).items():
if class_name not in self.classifications:
logging.info(f'Reading {self.geography_file}: found Welsh labels for unknown '
f'classification: {class_name}')
continue
if not self.classifications[class_name].private['Is_Geographic']:
self.recoverable_error(f'Reading {self.geography_file}: found Welsh labels for '
f'non geographic classification: {class_name}')
continue
welsh_names = {cd: nm.welsh_name for cd, nm in geo_cats.items() if nm.welsh_name}
if geo_cats:
categories[class_name] = Bilingual(None, welsh_names if welsh_names else None)
return categories
@property
@lru_cache(maxsize=1)
def topics(self):
"""Load topics."""
columns = [
required('Topic_Mnemonic', unique=True),
required('Topic_Title'),
required('Topic_Description'),
required('Id'),
optional('Topic_Description_Welsh'),
optional('Topic_Title_Welsh'),
]
topic_rows = self.read_file('Topic.csv', columns)
topics = {}
for topic, _ in topic_rows:
topic['Topic_Description'] = Bilingual(topic.pop('Topic_Description'),
topic.pop('Topic_Description_Welsh'))
topic['Topic_Title'] = Bilingual(topic.pop('Topic_Title'),
topic.pop('Topic_Title_Welsh'))
del topic['Id']
topics[topic['Topic_Mnemonic']] = BilingualDict(topic)
return topics
@property
@lru_cache(maxsize=1)
def questions(self):
"""Load questions."""
columns = [
required('Question_Code', unique=True),
required('Question_Label'),
required('Version'),
required('Id'),
optional('Question_Label_Welsh'),
optional('Reason_For_Asking_Question'),
optional('Reason_For_Asking_Question_Welsh'),
optional('Question_First_Asked_In_Year'),
]
question_rows = self.read_file('Question.csv', columns)
questions = {}
for question, _ in question_rows:
question['Question_Label'] = Bilingual(
question.pop('Question_Label'),
question.pop('Question_Label_Welsh'))
question['Reason_For_Asking_Question'] = Bilingual(
question.pop('Reason_For_Asking_Question'),
question.pop('Reason_For_Asking_Question_Welsh'))
del question['Id']
questions[question['Question_Code']] = BilingualDict(question)
return questions
@property
@lru_cache(maxsize=1)
def variable_types(self):
"""Load variable types."""
filename = 'Variable_Type.csv'
columns = [
required('Variable_Type_Code', unique=True),
required('Variable_Type_Description'),
required('Id'),
optional('Variable_Type_Description_Welsh'),
]
variable_type_rows = self.read_file(filename, columns)
variable_types = {}
for var_type, _ in variable_type_rows:
var_type['Variable_Type_Description'] = Bilingual(
var_type.pop('Variable_Type_Description'),
var_type.pop('Variable_Type_Description_Welsh'))
del var_type['Id']
variable_types[var_type['Variable_Type_Code']] = BilingualDict(var_type)
# GEOGRAPHIC_VARIABLE_TYPE is used to identify geographic variables. Ensure that it is
# one of the Variable_Type_Code values in the source file.
if GEOGRAPHIC_VARIABLE_TYPE not in variable_types:
raise ValueError(f'{GEOGRAPHIC_VARIABLE_TYPE} not found as Variable_Type_Code for any '
f'entry in {self.full_filename(filename)}')
return variable_types
@property
@lru_cache(maxsize=1)
def variables(self):
"""Load variables."""
filename = 'Variable.csv'
columns = [
required('Variable_Mnemonic', unique=True),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Variable_Type_Code', validate_fn=isoneof(self.variable_types.keys())),
required('Variable_Title'),
required('Variable_Description'),
required('Id'),
required('Version'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
# Required for non-geographic variables but not always populated in source files
optional('Statistical_Unit', validate_fn=isoneof(self.statistical_units.keys())),
# Required for geographic variables but not yet populated
optional('Geographic_Abbreviation'),
optional('Geographic_Theme'),
optional('Geographic_Coverage'),
optional('Variable_Title_Welsh'),
optional('Variable_Description_Welsh'),
optional('Variable_Mnemonic_2011'),
optional('Comparability_Comments'),
optional('Comparability_Comments_Welsh'),
optional('Uk_Comparison_Comments'),
optional('Uk_Comparison_Comments_Welsh'),
optional('Geographic_Abbreviation_Welsh'),
optional('Geographic_Theme_Welsh'),
optional('Geographic_Coverage_Welsh'),
optional('Topic_Mnemonic', validate_fn=isoneof(self.topics.keys())),
optional('Number_Of_Classifications'),
optional('Quality_Statement_Text'),
optional('Quality_Summary_URL'),
]
variable_rows = self.read_file(filename, columns)
variable_mnemonics = [v.data['Variable_Mnemonic'] for v in variable_rows]
variable_to_keywords = self.load_variable_to_keywords(variable_mnemonics)
variable_to_source_questions = self.load_variable_to_questions(variable_mnemonics)
en_geo_fields = {'Geographic_Abbreviation', 'Geographic_Theme', 'Geographic_Coverage'}
all_geo_fields = en_geo_fields | {'Geographic_Abbreviation_Welsh',
'Geographic_Theme_Welsh',
'Geographic_Coverage_Welsh'}
variables = {}
for variable, row_num in variable_rows:
# Ensure that non-geographic variables do not have geographic values set.
is_geographic = variable['Variable_Type_Code'] == GEOGRAPHIC_VARIABLE_TYPE
if not is_geographic:
# This value is not always populated in source files
# if not variable['Statistical_Unit']:
# raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
# f'no Statistical_Unit specified for non geographic variable: '
# f'{variable['Variable_Mnemonic']}')
for geo_field in all_geo_fields:
if variable[geo_field]:
self.recoverable_error(f'Reading {self.full_filename(filename)}:{row_num} '
f'{geo_field} specified for non geographic '
f'variable: {variable['Variable_Mnemonic']}')
# These values are not yet populated in source files
# else:
# for geo_field in en_geo_fields:
# if not variable[geo_field]:
# raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
# f'no {geo_field} specified for geographic variable: '
# f'{variable['Variable_Mnemonic']}')
variable_title = Bilingual(
variable.pop('Variable_Title'),
variable.pop('Variable_Title_Welsh'))
variable['Variable_Title'] = variable_title
variable['Comparability_Comments'] = Bilingual(
variable.pop('Comparability_Comments'),
variable.pop('Comparability_Comments_Welsh'))
variable['Uk_Comparison_Comments'] = Bilingual(
variable.pop('Uk_Comparison_Comments'),
variable.pop('Uk_Comparison_Comments_Welsh'))
variable['Geographic_Abbreviation'] = Bilingual(
variable.pop('Geographic_Abbreviation'),
variable.pop('Geographic_Abbreviation_Welsh'))
variable['Geographic_Theme'] = Bilingual(
variable.pop('Geographic_Theme'),
variable.pop('Geographic_Theme_Welsh'))
variable['Geographic_Coverage'] = Bilingual(
variable.pop('Geographic_Coverage'),
variable.pop('Geographic_Coverage_Welsh'))
variable['Variable_Type'] = self.variable_types.get(variable.pop('Variable_Type_Code'),
None)
variable['Statistical_Unit'] = self.statistical_units.get(
variable.pop('Statistical_Unit'), None)
variable['Topic'] = self.topics.get(variable.pop('Topic_Mnemonic'), None)
variable['Keywords'] = variable_to_keywords.get(variable['Variable_Mnemonic'], [])
variable['Questions'] = variable_to_source_questions.get(
variable['Variable_Mnemonic'], [])
del variable['Id']
del variable['Signed_Off_Flag']
# Number_Of_Classifications is not validated
del variable['Number_Of_Classifications']
variables[variable['Variable_Mnemonic']] = BilingualDict(
variable,
# A check is performed elsewhere to ensure that public classifications have public
# variables. Is_Geographic is used to check whether variables are geographic.
# Variable_Title and Version are used when creating classifications for geographic
# variables.
private={'Security_Mnemonic': variable.pop('Security_Mnemonic'),
'Is_Geographic': is_geographic,
'Variable_Title': variable_title,
'Version': variable['Version'],
'Variable_Description': Bilingual(
variable.pop('Variable_Description'),
variable.pop('Variable_Description_Welsh'))})
return variables
@property
@lru_cache(maxsize=1)
def classifications(self):
"""Load classifications."""
filename = 'Classification.csv'
columns = [
required('Id'),
required('Classification_Mnemonic', unique=True),
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
required('Internal_Classification_Label_English'),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Version'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
optional('Number_Of_Category_Items', validate_fn=isnumeric),
optional('External_Classification_Label_English'),
optional('External_Classification_Label_Welsh'),
optional('Mnemonic_2011'),
optional('Parent_Classification_Mnemonic'),
optional('Default_Classification_Flag'),
optional('Flat_Classification_Flag'),
]
classification_rows = self.read_file(filename, columns)
classification_mnemonics = [c.data['Classification_Mnemonic'] for c in classification_rows]
classification_to_topics = self.load_classification_to_topics(classification_mnemonics)
classifications = {}
for classification, row_num in classification_rows:
variable_mnemonic = classification.pop('Variable_Mnemonic')
classification_mnemonic = classification.pop('Classification_Mnemonic')
if self.variables[variable_mnemonic].private['Is_Geographic']:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
f'{classification_mnemonic} has a geographic variable '
f'{variable_mnemonic} which is not allowed')
ons_variable = self.variables[variable_mnemonic]
classification['ONS_Variable'] = ons_variable
classification['Topics'] = classification_to_topics.get(classification_mnemonic, [])
internal_label = classification.pop('Internal_Classification_Label_English')
external_label = classification.pop('External_Classification_Label_English')
if not external_label:
external_label = internal_label
# Ensure that if a classification is public that the associated variable is public.
if classification['Security_Mnemonic'] == PUBLIC_SECURITY_MNEMONIC:
variable = classification['ONS_Variable']
if variable.private['Security_Mnemonic'] != PUBLIC_SECURITY_MNEMONIC:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
f'Public classification {classification_mnemonic} has '
f'non-public variable {variable_mnemonic}')
del classification['Signed_Off_Flag']
del classification['Flat_Classification_Flag']
del classification['Id']
num_cat_items = classification.pop('Number_Of_Category_Items')
num_cat_items = int(num_cat_items) if num_cat_items else 0
classifications[classification_mnemonic] = BilingualDict(
classification,
# The private fields and not included in the English/Welsh variants of datasets.
# They are used later in processing and included in different parts of the metadata
# hierarchy.
private={
'Number_Of_Category_Items': num_cat_items,
'Security_Mnemonic': classification.pop('Security_Mnemonic'),
'Classification_Label': Bilingual(
external_label,
classification.pop('External_Classification_Label_Welsh')),
'Variable_Mnemonic': variable_mnemonic,
'Variable_Description': ons_variable.private['Variable_Description'],
'Is_Geographic': False})
# Every geographic variable must have a corresponding classification with the same
# mnemonic. This is due to the fact that the dataset specifies a geographic variable
# rather than a classification. Automatically create a classification for each geographic
# variable.
for variable_mnemonic, variable in self.variables.items():
if variable.private['Is_Geographic']:
logging.debug('Creating classification for geographic variable: '
f'{variable_mnemonic}')
classifications[variable_mnemonic] = BilingualDict(
{
'Mnemonic_2011': None,
'Parent_Classification_Mnemonic': variable_mnemonic,
'Default_Classification_Flag': None,
'Version': variable.private['Version'],
'ONS_Variable': variable,
'Topics': [],
},
private={
'Number_Of_Category_Items': 0,
'Security_Mnemonic': variable.private['Security_Mnemonic'],
'Classification_Label': variable.private['Variable_Title'],
'Variable_Mnemonic': variable_mnemonic,
'Variable_Description': variable.private['Variable_Description'],
'Is_Geographic': True})
return classifications
def load_database_to_variables(self, database_mnemonics):
"""
Load the variables associated with each database.
This involves reading the Database_Variable.csv file which identifies the variables
associated with each database, identifying the classifications associated with
each variable and identifying the geographic variable with Lowest_Geog_Variable_Flag set
to Y.
"""
filename = 'Database_Variable.csv'
columns = [
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
required('Database_Mnemonic', validate_fn=isoneof(database_mnemonics)),
required('Id'),
required('Version'),
optional('Lowest_Geog_Variable_Flag', validate_fn=isoneof(['Y', 'N'])),
]
database_variable_rows = self.read_file(
filename, columns,
# There can only be one row for each Variable_Mnemonic/Database_Mnemonic combination.
unique_combo_fields=['Variable_Mnemonic', 'Database_Mnemonic'])
db_to_raw_vars = {}
for db_var, _ in database_variable_rows:
append_to_list_in_dict(db_to_raw_vars, db_var['Database_Mnemonic'], db_var)
database_to_variables = {}
for database_mnemonic, db_vars in db_to_raw_vars.items():
lowest_geog_var = None
variables = []
contains_geo_vars = False
for db_var in db_vars:
database_mnemonic = db_var['Database_Mnemonic']
variable_mnemonic = db_var['Variable_Mnemonic']
is_geographic = self.variables[variable_mnemonic].private['Is_Geographic']
if is_geographic:
contains_geo_vars = True
if db_var['Lowest_Geog_Variable_Flag'] == 'Y':
if not is_geographic:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
'Lowest_Geog_Variable_Flag set on non-geographic variable'
f' {variable_mnemonic} for database {database_mnemonic}')
elif lowest_geog_var:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
f'Lowest_Geog_Variable_Flag set on {variable_mnemonic} '
f'and {lowest_geog_var} for database {database_mnemonic}')
else:
lowest_geog_var = variable_mnemonic
variables.append(variable_mnemonic)
if not lowest_geog_var and contains_geo_vars:
self.recoverable_error(f'Reading {self.full_filename(filename)} '
'Lowest_Geog_Variable_Flag not set on any geographic '
f'variable for database {database_mnemonic}')
database_to_variables[database_mnemonic] = DatabaseVariables(
variables=variables, lowest_geog_variable=lowest_geog_var)
return database_to_variables
def load_dataset_to_related(self, dataset_mnemonics):
"""Load the related datasets relationships."""
filename = 'Related_Datasets.csv'
columns = [
required('Related_Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
]
related_dataset_rows = self.read_file(
filename, columns,
# There can only be one row for each Related_Dataset_Mnemonic/Dataset_Mnemonic
# combination.
unique_combo_fields=['Related_Dataset_Mnemonic', 'Dataset_Mnemonic'])
ds_to_related_ds_mnemonics = {}
for rel_ds, _ in related_dataset_rows:
append_to_list_in_dict(ds_to_related_ds_mnemonics, rel_ds['Dataset_Mnemonic'],
rel_ds['Related_Dataset_Mnemonic'])
return ds_to_related_ds_mnemonics
def load_dataset_to_keywords(self, dataset_mnemonics):
"""Load keywords associated with each dataset."""
columns = [
required('Dataset_Keyword'),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
optional('Dataset_Keyword_Welsh'),
]
dataset_keyword_rows = self.read_file(
'Dataset_Keyword.csv', columns,
# There can only be one row for each Dataset_Mnemonic/Dataset_Keyword combination.
unique_combo_fields=['Dataset_Mnemonic', 'Dataset_Keyword'])
dataset_to_keywords = {}
for ds_key, _ in dataset_keyword_rows:
append_to_list_in_dict(dataset_to_keywords, ds_key['Dataset_Mnemonic'],
Bilingual(ds_key['Dataset_Keyword'],
ds_key['Dataset_Keyword_Welsh']))
return dataset_to_keywords
def load_dataset_to_publications(self, dataset_mnemonics):
"""Load publications associated with each dataset."""
columns = [
required('Publication_Mnemonic', unique=True),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
optional('Publication_Title'),
optional('Publisher_Name'),
optional('Publisher_Website'),
]
publication_dataset_rows = self.read_file('Publication_Dataset.csv', columns)
dataset_to_pubs = {}
for ds_pub, _ in publication_dataset_rows:
del ds_pub['Id']
append_to_list_in_dict(dataset_to_pubs, ds_pub.pop('Dataset_Mnemonic'),
BilingualDict(ds_pub))
return dataset_to_pubs
def load_dataset_to_releases(self, dataset_mnemonics):
"""Load releases associated with each dataset."""
columns = [
required('Census_Release_Number', validate_fn=isoneof(self.census_releases.keys())),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
]
release_dataset_rows = self.read_file(
'Release_Dataset.csv', columns,
# There can only be one row for each Dataset_Mnemonic/Census_Release_Number
# combination.
unique_combo_fields=['Dataset_Mnemonic', 'Census_Release_Number'])
dataset_to_releases = {}
for rel_ds, _ in release_dataset_rows:
append_to_list_in_dict(dataset_to_releases, rel_ds['Dataset_Mnemonic'],
self.census_releases[rel_ds['Census_Release_Number']])
return dataset_to_releases
def load_variable_to_keywords(self, variable_mnemonics):
"""Load keywords associated with each variable."""
columns = [
required('Variable_Mnemonic', validate_fn=isoneof(variable_mnemonics)),
required('Variable_Keyword'),
required('Id'),
optional('Variable_Keyword_Welsh'),
]
variable_keyword_rows = self.read_file(
'Variable_Keyword.csv', columns,
# There can only be one row for each Variable_Mnemonic/Variable_Keyword combination.
unique_combo_fields=['Variable_Mnemonic', 'Variable_Keyword'])
variable_to_keywords = {}
for var_key, _ in variable_keyword_rows:
append_to_list_in_dict(variable_to_keywords, var_key['Variable_Mnemonic'],
Bilingual(var_key['Variable_Keyword'],
var_key['Variable_Keyword_Welsh']))
return variable_to_keywords
def load_variable_to_questions(self, variable_mnemonics):
"""Load questions associated with each variable."""
columns = [
required('Source_Question_Code', validate_fn=isoneof(self.questions.keys())),
required('Variable_Mnemonic', validate_fn=isoneof(variable_mnemonics)),
required('Id'),
]
variable_source_question_rows = self.read_file(
'Variable_Source_Question.csv', columns,
# There can only be one row for each Variable_Mnemonic/Source_Question_Code
# combination.
unique_combo_fields=['Variable_Mnemonic', 'Source_Question_Code'])
var_to_src_questions = {}
for src_q, _ in variable_source_question_rows:
append_to_list_in_dict(var_to_src_questions, src_q['Variable_Mnemonic'],
self.questions[src_q['Source_Question_Code']])
return var_to_src_questions
def load_dataset_to_variables(self, dataset_mnemonics):
"""
Load variables associated with each dataset.
Variables can be geographic or non-geographic. Geographic variables will not have
Classification_Mnemonic or Processing_Priority set. If there are geographic variables then
one of them will have Lowest_Geog_Variable_Flag set to Y.
Each non-geographic variable will also have Classification_Mnemonic and Processing_Priority
set. The processing priorities indicate the order of the classifications. The priorities
for each dataset must be unique and sequential, starting from 1. The classifications are
ordered based on the priorities.
The geographic variable with Lowest_Geog_Variable_Flag set to Y (if present) is placed
at the start of the classifications list. There will be a classification with the same
mnemonic as each geographic variable.
"""
filename = 'Dataset_Variable.csv'
columns = [
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
optional('Classification_Mnemonic', validate_fn=isoneof(self.classifications.keys())),
optional('Processing_Priority', validate_fn=isnumeric),
optional('Lowest_Geog_Variable_Flag', validate_fn=isoneof({'Y', 'N'})),
]
dataset_variable_rows = self.read_file(
filename, columns,
# There can only be one row for each Dataset_Mnemonic/Variable_Mnemonic
# combination.
unique_combo_fields=['Dataset_Mnemonic', 'Variable_Mnemonic'])
ds_to_vars_builder = {}
for ds_variable, row_num in dataset_variable_rows:
dataset_mnemonic = ds_variable['Dataset_Mnemonic']
variable_mnemonic = ds_variable['Variable_Mnemonic']
if dataset_mnemonic not in ds_to_vars_builder:
ds_to_vars_builder[dataset_mnemonic] = DatasetVarsBuilder(
dataset_mnemonic, self.full_filename(filename), self.classifications,
self.recoverable_error)
vars_builder = ds_to_vars_builder[dataset_mnemonic]
if self.variables[variable_mnemonic].private['Is_Geographic']:
vars_builder.add_geographic_variable(ds_variable, row_num)
else:
vars_builder.add_non_geographic_variable(ds_variable, row_num)
ds_to_variables = {}
for dataset_mnemonic, vars_builder in ds_to_vars_builder.items():
ds_to_variables[dataset_mnemonic] = vars_builder.dataset_variables()
return ds_to_variables
def load_classification_to_topics(self, classification_mnemonics):
"""Load topics associated with each classification."""
columns = [
required('Topic_Mnemonic', validate_fn=isoneof(self.topics.keys())),
required('Classification_Mnemonic', validate_fn=isoneof(classification_mnemonics)),
required('Id'),
]
topic_classification_rows = self.read_file(
'Topic_Classification.csv', columns,
# There can only be one row for each Classification_Mnemonic/Topic_Mnemonic
# combination.
unique_combo_fields=['Classification_Mnemonic', 'Topic_Mnemonic'])
classification_to_topics = {}
for topic_class, _ in topic_classification_rows:
append_to_list_in_dict(classification_to_topics,
topic_class['Classification_Mnemonic'],
self.topics[topic_class['Topic_Mnemonic']])
return classification_to_topics
| """Load metadata from CSV files and export in JSON format."""
import os
import logging
from collections import namedtuple
from functools import lru_cache
from ons_csv_to_ctb_json_bilingual import BilingualDict, Bilingual
from ons_csv_to_ctb_json_read import Reader, required, optional
from ons_csv_to_ctb_json_geo import read_geo_cats
from ons_csv_to_ctb_json_ds_vars import DatasetVarsBuilder, DatasetVariables
PUBLIC_SECURITY_MNEMONIC = 'PUB'
GEOGRAPHIC_VARIABLE_TYPE = 'GEOG'
DatabaseVariables = namedtuple('DatabaseVariables', 'variables lowest_geog_variable')
def isnumeric(string):
"""Check whether the string is numeric."""
return string.isnumeric()
def is_y_or_n(string):
"""Return true if the string is either 'Y' or 'N'."""
return string in ['Y', 'N']
def isoneof(valid_values):
"""Return a function that checks whether the value is in the specified set of values."""
valid_values_set = set(valid_values)
def validate_fn(value):
"""Check if value is in set."""
return value in valid_values_set
return validate_fn
def append_to_list_in_dict(dictionary, key, value):
"""
Append a value to the list at dictionary[key].
An empty list is first created if the key is not in dictionary.
"""
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
class Loader:
"""
Loader contains methods for loading metadata objects from CSV files.
Some of the CSV source files contain information on metadata objects. Basic validation is
performed on each row e.g. to verify that required fields are populated,foreign keys are valid
etc. The raw data is then modified and relationships to other objects are resolved to create
a hierarchical representation of the metadata. Other files contain relationships between
objects. The data in these files is also validated and relationships between objects are
created.
Many of the fields in this class are cached properties, with the data loaded on first access.
"""
def __init__(self, input_directory, geography_file, best_effort=False):
"""Initialise MetadataLoader object."""
self.input_directory = input_directory
self.geography_file = geography_file
self._error_count = 0
def raise_value_error(msg):
"""Raise a ValueError exception."""
raise ValueError(msg)
def log_error(msg):
"""Log the error."""
self._error_count += 1
logging.warning(msg)
self.recoverable_error = log_error if best_effort else raise_value_error
def error_count(self):
"""Return number of errors."""
return self._error_count
def read_file(self, filename, columns, unique_combo_fields=None):
"""
Read data from a CSV file.
A list of ons_csv_to_ctb_json_read.Row objects is returned. Each Row contains the data
and corresponding line number.
"""
full_filename = self.full_filename(filename)
return Reader(full_filename, columns, self.recoverable_error, unique_combo_fields).read()
def full_filename(self, filename):
"""Add the input_directory path to the filename."""
return os.path.join(self.input_directory, filename)
@property
@lru_cache(maxsize=1)
def contacts(self):
"""Load contacts."""
columns = [
required('Contact_Id', unique=True),
required('Contact_Name'),
required('Contact_Email'),
optional('Contact_Phone'),
optional('Contact_Website'),
]
contact_rows = self.read_file('Contact.csv', columns)
contacts = {}
for contact, _ in contact_rows:
contacts[contact['Contact_Id']] = BilingualDict(contact)
return contacts
@property
@lru_cache(maxsize=1)
def sources(self):
"""Load sources."""
columns = [
required('Source_Mnemonic', unique=True),
required('Source_Description'),
required('Id'),
required('Version'),
optional('Source_Description_Welsh'),
optional('Copyright_Statement'),
optional('Licence'),
optional('Nationals_Statistic_Certified'),
optional('Methodology_Link'),
optional('Methodology_Statement'),
optional('Methodology_Statement_Welsh'),
optional('SDC_Link'),
optional('SDC_Statement'),
optional('SDC_Statement_Welsh'),
optional('Contact_Id', validate_fn=isoneof(self.contacts.keys())),
]
source_rows = self.read_file('Source.csv', columns)
sources = {}
for source, _ in source_rows:
source['Source_Description'] = Bilingual(
source.pop('Source_Description'),
source.pop('Source_Description_Welsh'))
source['Methodology_Statement'] = Bilingual(
source.pop('Methodology_Statement'),
source.pop('Methodology_Statement_Welsh'))
source['SDC_Statement'] = Bilingual(
source.pop('SDC_Statement'),
source.pop('SDC_Statement_Welsh'))
source['Contact'] = self.contacts.get(source.pop('Contact_Id'), None)
del source['Id']
sources[source['Source_Mnemonic']] = BilingualDict(source)
return sources
@property
@lru_cache(maxsize=1)
def census_releases(self):
"""Load census releases."""
columns = [
required('Census_Release_Number', unique=True),
required('Census_Release_Description'),
required('Release_Date'),
required('Id'),
]
census_release_rows = self.read_file('Census_Release.csv', columns)
census_releases = {}
for census_release, _ in census_release_rows:
del census_release['Id']
census_releases[census_release['Census_Release_Number']] = BilingualDict(
census_release)
return census_releases
@property
@lru_cache(maxsize=1)
def security_classifications(self):
"""
Load security classifications.
Security classifications are not explicitly exported in the JSON output. Only datasets
and classifications with a public security classification are exported.
"""
filename = 'Security_Classification.csv'
columns = [
required('Security_Mnemonic', unique=True),
required('Id'),
required('Security_Description'),
optional('Security_Description_Welsh'),
]
security_classification_rows = self.read_file(filename, columns)
security_classifications = {sc.data['Security_Mnemonic'] for sc in
security_classification_rows}
# PUBLIC_SECURITY_MNEMONIC is used to identify datasets, variables and classifications that
# should be included in the JSON output. Ensure that it is one of the Security_Mnemonic
# values in the source file.
if PUBLIC_SECURITY_MNEMONIC not in security_classifications:
raise ValueError(f'{PUBLIC_SECURITY_MNEMONIC} not found as Security_Mnemonic for any '
f'entry in {self.full_filename(filename)}')
return security_classifications
@property
@lru_cache(maxsize=1)
def statistical_units(self):
"""Load statistical units."""
columns = [
required('Statistical_Unit', unique=True),
required('Statistical_Unit_Description'),
required('Id'),
optional('Statistical_Unit_Description_Welsh'),
]
statistical_unit_rows = self.read_file('Statistical_Unit.csv', columns)
statistical_units = {}
for stat_unit, _ in statistical_unit_rows:
stat_unit['Statistical_Unit_Description'] = Bilingual(
stat_unit.pop('Statistical_Unit_Description'),
stat_unit.pop('Statistical_Unit_Description_Welsh'))
del stat_unit['Id']
statistical_units[stat_unit['Statistical_Unit']] = BilingualDict(stat_unit)
return statistical_units
@property
@lru_cache(maxsize=1)
def datasets(self):
"""Load datasets."""
filename = 'Dataset.csv'
columns = [
required('Dataset_Mnemonic', unique=True),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Database_Mnemonic', validate_fn=isoneof(self.databases.keys())),
required('Dataset_Title'),
required('Id'),
required('Geographic_Coverage'),
required('Dataset_Population'),
required('Statistical_Unit', validate_fn=isoneof(self.statistical_units.keys())),
required('Version'),
required('Dataset_Description'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
optional('Dataset_Title_Welsh'),
optional('Dataset_Description_Welsh'),
optional('Dataset_Mnemonic_2011'),
optional('Geographic_Coverage_Welsh'),
optional('Dataset_Population_Welsh'),
optional('Last_Updated'),
optional('Unique_Url'),
optional('Contact_Id', validate_fn=isoneof(self.contacts.keys())),
]
dataset_rows = self.read_file(filename, columns)
dataset_mnemonics = [d.data['Dataset_Mnemonic'] for d in dataset_rows]
dataset_to_related_datasets = self.load_dataset_to_related(dataset_mnemonics)
dataset_to_keywords = self.load_dataset_to_keywords(dataset_mnemonics)
dataset_to_publications = self.load_dataset_to_publications(dataset_mnemonics)
dataset_to_releases = self.load_dataset_to_releases(dataset_mnemonics)
dataset_to_variables = self.load_dataset_to_variables(dataset_mnemonics)
datasets = {}
for dataset, row_num in dataset_rows:
dataset_mnemonic = dataset.pop('Dataset_Mnemonic')
database_mnemonic = dataset.pop('Database_Mnemonic')
dataset['Geographic_Coverage'] = Bilingual(dataset.pop('Geographic_Coverage'),
dataset.pop('Geographic_Coverage_Welsh'))
dataset['Dataset_Population'] = Bilingual(dataset.pop('Dataset_Population'),
dataset.pop('Dataset_Population_Welsh'))
dataset['Statistical_Unit'] = self.statistical_units.get(
dataset.pop('Statistical_Unit'), None)
dataset['Contact'] = self.contacts.get(dataset.pop('Contact_Id'), None)
dataset['Keywords'] = dataset_to_keywords.get(dataset_mnemonic, [])
dataset['Related_Datasets'] = dataset_to_related_datasets.get(dataset_mnemonic, [])
dataset['Census_Releases'] = dataset_to_releases.get(dataset_mnemonic, [])
dataset['Publications'] = dataset_to_publications.get(dataset_mnemonic, [])
dataset_variables = dataset_to_variables.get(
dataset_mnemonic, DatasetVariables([], []))
alternate_geog_variables = (dataset_variables.alternate_geog_variables if
dataset_variables.alternate_geog_variables else [])
dataset['Alternate_Geographic_Variables'] = alternate_geog_variables
all_classifications = dataset_variables.classifications + alternate_geog_variables
# If the dataset is public then ensure that there is at least one classification and
# that all the classifications are also public.
if dataset['Security_Mnemonic'] == PUBLIC_SECURITY_MNEMONIC:
drop_dataset = False
if not dataset_variables.classifications:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} {dataset_mnemonic} '
'has no associated classifications or geographic variable')
drop_dataset = True
for classification in all_classifications:
if self.classifications[classification].private['Security_Mnemonic'] != \
PUBLIC_SECURITY_MNEMONIC:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} Public ONS '
f'dataset {dataset_mnemonic} has non-public classification '
f'{classification}')
drop_dataset = True
if classification not in \
self.databases[database_mnemonic].private['Classifications']:
self.recoverable_error(
f'Reading {self.full_filename(filename)}:{row_num} '
f'{dataset_mnemonic} has classification {classification} '
f'that is not in database {database_mnemonic}')
drop_dataset = True
if drop_dataset:
logging.warning(
f'Reading {self.full_filename(filename)}:{row_num} dropping record')
continue
del dataset['Id']
del dataset['Signed_Off_Flag']
datasets[dataset_mnemonic] = BilingualDict(
dataset,
# The private fields and not included in the English/Welsh variants of datasets.
# They are used later in processing and included in different parts of the metadata
# hierarchy.
private={
'Security_Mnemonic': dataset.pop('Security_Mnemonic'),
'Dataset_Title': Bilingual(dataset.pop('Dataset_Title'),
dataset.pop('Dataset_Title_Welsh')),
'Dataset_Description': Bilingual(dataset.pop('Dataset_Description'),
dataset.pop('Dataset_Description_Welsh')),
'Database_Mnemonic': database_mnemonic,
'Classifications': dataset_variables.classifications,
})
return datasets
@property
@lru_cache(maxsize=1)
def databases(self):
"""Load databases."""
columns = [
required('Database_Mnemonic', unique=True),
required('Source_Mnemonic', validate_fn=isoneof(self.sources.keys())),
required('Database_Title'),
required('Id'),
required('Database_Description'),
required('Version'),
# This should be mandatory but is not yet populated
optional('Cantabular_DB_Flag', validate_fn=is_y_or_n),
optional('Database_Title_Welsh'),
optional('Database_Description_Welsh'),
optional('IAR_Asset_Id'),
]
database_rows = self.read_file('Database.csv', columns)
database_mnemonics = [d.data['Database_Mnemonic'] for d in database_rows]
database_to_variables = self.load_database_to_variables(database_mnemonics)
databases = {}
for database, _ in database_rows:
database['Source'] = self.sources.get(database.pop('Source_Mnemonic'), None)
del database['Id']
del database['IAR_Asset_Id']
database_mnemonic = database.pop('Database_Mnemonic')
db_vars = database_to_variables.get(database_mnemonic, DatabaseVariables([], None))
classifications = [k for k, v in self.classifications.items() if
v.private['Variable_Mnemonic'] in db_vars.variables]
database['Lowest_Geog_Variable'] = db_vars.lowest_geog_variable
databases[database_mnemonic] = BilingualDict(
database,
# Database_Title is used to populate a Cantabular built-in field.
private={'Database_Title': Bilingual(database.pop('Database_Title'),
database.pop('Database_Title_Welsh')),
'Database_Description': Bilingual(
database.pop('Database_Description'),
database.pop('Database_Description_Welsh')),
'Classifications': classifications})
return databases
@property
@lru_cache(maxsize=1)
def categories(self):
"""
Load categories.
Cantabular has a built-in catLabels concept. This is a dictionary of category codes to
category labels. The data from the categories file are converted to this format.
English values are excluded. These will be present in the codebook. Unpopulated Welsh
values are also excluded.
"""
filename = 'Category.csv'
columns = [
required('Category_Code'),
required('Classification_Mnemonic', validate_fn=isoneof(self.classifications.keys())),
required('Internal_Category_Label_English'),
required('Id'),
required('Variable_Mnemonic'),
required('Version'),
# Sort_Order values are not validated as this is an optional field.
optional('Sort_Order'),
optional('External_Category_Label_English'),
optional('External_Category_Label_Welsh'),
]
category_rows = self.read_file(
filename, columns,
# There can only be one row for each Category_Code/Classification_Mnemonic combination.
unique_combo_fields=['Category_Code', 'Classification_Mnemonic'])
classification_to_cats = {}
for cat, row_num in category_rows:
classification_mnemonic = cat['Classification_Mnemonic']
if self.classifications[classification_mnemonic].private['Is_Geographic']:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
'found category for geographic classification '
f'{classification_mnemonic}: all categories for geographic '
'classifications must be in a separate lookup file')
append_to_list_in_dict(classification_to_cats, classification_mnemonic, cat)
categories = {}
for classification_mnemonic, one_var_categories in classification_to_cats.items():
num_cat_items = \
self.classifications[classification_mnemonic].private['Number_Of_Category_Items']
if num_cat_items and len(one_var_categories) != num_cat_items:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
f'Unexpected number of categories for {classification_mnemonic}: '
f'expected {num_cat_items} but found {len(one_var_categories)}')
welsh_cats = {cat['Category_Code']: cat['External_Category_Label_Welsh']
for cat in one_var_categories if cat['External_Category_Label_Welsh']}
if welsh_cats:
categories[classification_mnemonic] = Bilingual(None, welsh_cats)
# Categories for geographic variables are supplied in a separate file.
if not self.geography_file:
logging.info('No geography file specified')
return categories
for class_name, geo_cats in read_geo_cats(self.geography_file).items():
if class_name not in self.classifications:
logging.info(f'Reading {self.geography_file}: found Welsh labels for unknown '
f'classification: {class_name}')
continue
if not self.classifications[class_name].private['Is_Geographic']:
self.recoverable_error(f'Reading {self.geography_file}: found Welsh labels for '
f'non geographic classification: {class_name}')
continue
welsh_names = {cd: nm.welsh_name for cd, nm in geo_cats.items() if nm.welsh_name}
if geo_cats:
categories[class_name] = Bilingual(None, welsh_names if welsh_names else None)
return categories
@property
@lru_cache(maxsize=1)
def topics(self):
"""Load topics."""
columns = [
required('Topic_Mnemonic', unique=True),
required('Topic_Title'),
required('Topic_Description'),
required('Id'),
optional('Topic_Description_Welsh'),
optional('Topic_Title_Welsh'),
]
topic_rows = self.read_file('Topic.csv', columns)
topics = {}
for topic, _ in topic_rows:
topic['Topic_Description'] = Bilingual(topic.pop('Topic_Description'),
topic.pop('Topic_Description_Welsh'))
topic['Topic_Title'] = Bilingual(topic.pop('Topic_Title'),
topic.pop('Topic_Title_Welsh'))
del topic['Id']
topics[topic['Topic_Mnemonic']] = BilingualDict(topic)
return topics
@property
@lru_cache(maxsize=1)
def questions(self):
"""Load questions."""
columns = [
required('Question_Code', unique=True),
required('Question_Label'),
required('Version'),
required('Id'),
optional('Question_Label_Welsh'),
optional('Reason_For_Asking_Question'),
optional('Reason_For_Asking_Question_Welsh'),
optional('Question_First_Asked_In_Year'),
]
question_rows = self.read_file('Question.csv', columns)
questions = {}
for question, _ in question_rows:
question['Question_Label'] = Bilingual(
question.pop('Question_Label'),
question.pop('Question_Label_Welsh'))
question['Reason_For_Asking_Question'] = Bilingual(
question.pop('Reason_For_Asking_Question'),
question.pop('Reason_For_Asking_Question_Welsh'))
del question['Id']
questions[question['Question_Code']] = BilingualDict(question)
return questions
@property
@lru_cache(maxsize=1)
def variable_types(self):
"""Load variable types."""
filename = 'Variable_Type.csv'
columns = [
required('Variable_Type_Code', unique=True),
required('Variable_Type_Description'),
required('Id'),
optional('Variable_Type_Description_Welsh'),
]
variable_type_rows = self.read_file(filename, columns)
variable_types = {}
for var_type, _ in variable_type_rows:
var_type['Variable_Type_Description'] = Bilingual(
var_type.pop('Variable_Type_Description'),
var_type.pop('Variable_Type_Description_Welsh'))
del var_type['Id']
variable_types[var_type['Variable_Type_Code']] = BilingualDict(var_type)
# GEOGRAPHIC_VARIABLE_TYPE is used to identify geographic variables. Ensure that it is
# one of the Variable_Type_Code values in the source file.
if GEOGRAPHIC_VARIABLE_TYPE not in variable_types:
raise ValueError(f'{GEOGRAPHIC_VARIABLE_TYPE} not found as Variable_Type_Code for any '
f'entry in {self.full_filename(filename)}')
return variable_types
@property
@lru_cache(maxsize=1)
def variables(self):
"""Load variables."""
filename = 'Variable.csv'
columns = [
required('Variable_Mnemonic', unique=True),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Variable_Type_Code', validate_fn=isoneof(self.variable_types.keys())),
required('Variable_Title'),
required('Variable_Description'),
required('Id'),
required('Version'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
# Required for non-geographic variables but not always populated in source files
optional('Statistical_Unit', validate_fn=isoneof(self.statistical_units.keys())),
# Required for geographic variables but not yet populated
optional('Geographic_Abbreviation'),
optional('Geographic_Theme'),
optional('Geographic_Coverage'),
optional('Variable_Title_Welsh'),
optional('Variable_Description_Welsh'),
optional('Variable_Mnemonic_2011'),
optional('Comparability_Comments'),
optional('Comparability_Comments_Welsh'),
optional('Uk_Comparison_Comments'),
optional('Uk_Comparison_Comments_Welsh'),
optional('Geographic_Abbreviation_Welsh'),
optional('Geographic_Theme_Welsh'),
optional('Geographic_Coverage_Welsh'),
optional('Topic_Mnemonic', validate_fn=isoneof(self.topics.keys())),
optional('Number_Of_Classifications'),
optional('Quality_Statement_Text'),
optional('Quality_Summary_URL'),
]
variable_rows = self.read_file(filename, columns)
variable_mnemonics = [v.data['Variable_Mnemonic'] for v in variable_rows]
variable_to_keywords = self.load_variable_to_keywords(variable_mnemonics)
variable_to_source_questions = self.load_variable_to_questions(variable_mnemonics)
en_geo_fields = {'Geographic_Abbreviation', 'Geographic_Theme', 'Geographic_Coverage'}
all_geo_fields = en_geo_fields | {'Geographic_Abbreviation_Welsh',
'Geographic_Theme_Welsh',
'Geographic_Coverage_Welsh'}
variables = {}
for variable, row_num in variable_rows:
# Ensure that non-geographic variables do not have geographic values set.
is_geographic = variable['Variable_Type_Code'] == GEOGRAPHIC_VARIABLE_TYPE
if not is_geographic:
# This value is not always populated in source files
# if not variable['Statistical_Unit']:
# raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
# f'no Statistical_Unit specified for non geographic variable: '
# f'{variable["Variable_Mnemonic"]}')
for geo_field in all_geo_fields:
if variable[geo_field]:
self.recoverable_error(f'Reading {self.full_filename(filename)}:{row_num} '
f'{geo_field} specified for non geographic '
f'variable: {variable["Variable_Mnemonic"]}')
# These values are not yet populated in source files
# else:
# for geo_field in en_geo_fields:
# if not variable[geo_field]:
# raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
# f'no {geo_field} specified for geographic variable: '
# f'{variable["Variable_Mnemonic"]}')
variable_title = Bilingual(
variable.pop('Variable_Title'),
variable.pop('Variable_Title_Welsh'))
variable['Variable_Title'] = variable_title
variable['Comparability_Comments'] = Bilingual(
variable.pop('Comparability_Comments'),
variable.pop('Comparability_Comments_Welsh'))
variable['Uk_Comparison_Comments'] = Bilingual(
variable.pop('Uk_Comparison_Comments'),
variable.pop('Uk_Comparison_Comments_Welsh'))
variable['Geographic_Abbreviation'] = Bilingual(
variable.pop('Geographic_Abbreviation'),
variable.pop('Geographic_Abbreviation_Welsh'))
variable['Geographic_Theme'] = Bilingual(
variable.pop('Geographic_Theme'),
variable.pop('Geographic_Theme_Welsh'))
variable['Geographic_Coverage'] = Bilingual(
variable.pop('Geographic_Coverage'),
variable.pop('Geographic_Coverage_Welsh'))
variable['Variable_Type'] = self.variable_types.get(variable.pop('Variable_Type_Code'),
None)
variable['Statistical_Unit'] = self.statistical_units.get(
variable.pop('Statistical_Unit'), None)
variable['Topic'] = self.topics.get(variable.pop('Topic_Mnemonic'), None)
variable['Keywords'] = variable_to_keywords.get(variable['Variable_Mnemonic'], [])
variable['Questions'] = variable_to_source_questions.get(
variable['Variable_Mnemonic'], [])
del variable['Id']
del variable['Signed_Off_Flag']
# Number_Of_Classifications is not validated
del variable['Number_Of_Classifications']
variables[variable['Variable_Mnemonic']] = BilingualDict(
variable,
# A check is performed elsewhere to ensure that public classifications have public
# variables. Is_Geographic is used to check whether variables are geographic.
# Variable_Title and Version are used when creating classifications for geographic
# variables.
private={'Security_Mnemonic': variable.pop('Security_Mnemonic'),
'Is_Geographic': is_geographic,
'Variable_Title': variable_title,
'Version': variable['Version'],
'Variable_Description': Bilingual(
variable.pop('Variable_Description'),
variable.pop('Variable_Description_Welsh'))})
return variables
@property
@lru_cache(maxsize=1)
def classifications(self):
"""Load classifications."""
filename = 'Classification.csv'
columns = [
required('Id'),
required('Classification_Mnemonic', unique=True),
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
required('Internal_Classification_Label_English'),
required('Security_Mnemonic', validate_fn=isoneof(self.security_classifications)),
required('Version'),
required('Signed_Off_Flag', validate_fn=is_y_or_n),
optional('Number_Of_Category_Items', validate_fn=isnumeric),
optional('External_Classification_Label_English'),
optional('External_Classification_Label_Welsh'),
optional('Mnemonic_2011'),
optional('Parent_Classification_Mnemonic'),
optional('Default_Classification_Flag'),
optional('Flat_Classification_Flag'),
]
classification_rows = self.read_file(filename, columns)
classification_mnemonics = [c.data['Classification_Mnemonic'] for c in classification_rows]
classification_to_topics = self.load_classification_to_topics(classification_mnemonics)
classifications = {}
for classification, row_num in classification_rows:
variable_mnemonic = classification.pop('Variable_Mnemonic')
classification_mnemonic = classification.pop('Classification_Mnemonic')
if self.variables[variable_mnemonic].private['Is_Geographic']:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
f'{classification_mnemonic} has a geographic variable '
f'{variable_mnemonic} which is not allowed')
ons_variable = self.variables[variable_mnemonic]
classification['ONS_Variable'] = ons_variable
classification['Topics'] = classification_to_topics.get(classification_mnemonic, [])
internal_label = classification.pop('Internal_Classification_Label_English')
external_label = classification.pop('External_Classification_Label_English')
if not external_label:
external_label = internal_label
# Ensure that if a classification is public that the associated variable is public.
if classification['Security_Mnemonic'] == PUBLIC_SECURITY_MNEMONIC:
variable = classification['ONS_Variable']
if variable.private['Security_Mnemonic'] != PUBLIC_SECURITY_MNEMONIC:
raise ValueError(f'Reading {self.full_filename(filename)}:{row_num} '
f'Public classification {classification_mnemonic} has '
f'non-public variable {variable_mnemonic}')
del classification['Signed_Off_Flag']
del classification['Flat_Classification_Flag']
del classification['Id']
num_cat_items = classification.pop('Number_Of_Category_Items')
num_cat_items = int(num_cat_items) if num_cat_items else 0
classifications[classification_mnemonic] = BilingualDict(
classification,
# The private fields and not included in the English/Welsh variants of datasets.
# They are used later in processing and included in different parts of the metadata
# hierarchy.
private={
'Number_Of_Category_Items': num_cat_items,
'Security_Mnemonic': classification.pop('Security_Mnemonic'),
'Classification_Label': Bilingual(
external_label,
classification.pop('External_Classification_Label_Welsh')),
'Variable_Mnemonic': variable_mnemonic,
'Variable_Description': ons_variable.private['Variable_Description'],
'Is_Geographic': False})
# Every geographic variable must have a corresponding classification with the same
# mnemonic. This is due to the fact that the dataset specifies a geographic variable
# rather than a classification. Automatically create a classification for each geographic
# variable.
for variable_mnemonic, variable in self.variables.items():
if variable.private['Is_Geographic']:
logging.debug('Creating classification for geographic variable: '
f'{variable_mnemonic}')
classifications[variable_mnemonic] = BilingualDict(
{
'Mnemonic_2011': None,
'Parent_Classification_Mnemonic': variable_mnemonic,
'Default_Classification_Flag': None,
'Version': variable.private['Version'],
'ONS_Variable': variable,
'Topics': [],
},
private={
'Number_Of_Category_Items': 0,
'Security_Mnemonic': variable.private['Security_Mnemonic'],
'Classification_Label': variable.private['Variable_Title'],
'Variable_Mnemonic': variable_mnemonic,
'Variable_Description': variable.private['Variable_Description'],
'Is_Geographic': True})
return classifications
def load_database_to_variables(self, database_mnemonics):
"""
Load the variables associated with each database.
This involves reading the Database_Variable.csv file which identifies the variables
associated with each database, identifying the classifications associated with
each variable and identifying the geographic variable with Lowest_Geog_Variable_Flag set
to Y.
"""
filename = 'Database_Variable.csv'
columns = [
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
required('Database_Mnemonic', validate_fn=isoneof(database_mnemonics)),
required('Id'),
required('Version'),
optional('Lowest_Geog_Variable_Flag', validate_fn=isoneof(['Y', 'N'])),
]
database_variable_rows = self.read_file(
filename, columns,
# There can only be one row for each Variable_Mnemonic/Database_Mnemonic combination.
unique_combo_fields=['Variable_Mnemonic', 'Database_Mnemonic'])
db_to_raw_vars = {}
for db_var, _ in database_variable_rows:
append_to_list_in_dict(db_to_raw_vars, db_var['Database_Mnemonic'], db_var)
database_to_variables = {}
for database_mnemonic, db_vars in db_to_raw_vars.items():
lowest_geog_var = None
variables = []
contains_geo_vars = False
for db_var in db_vars:
database_mnemonic = db_var['Database_Mnemonic']
variable_mnemonic = db_var['Variable_Mnemonic']
is_geographic = self.variables[variable_mnemonic].private['Is_Geographic']
if is_geographic:
contains_geo_vars = True
if db_var['Lowest_Geog_Variable_Flag'] == 'Y':
if not is_geographic:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
'Lowest_Geog_Variable_Flag set on non-geographic variable'
f' {variable_mnemonic} for database {database_mnemonic}')
elif lowest_geog_var:
self.recoverable_error(
f'Reading {self.full_filename(filename)} '
f'Lowest_Geog_Variable_Flag set on {variable_mnemonic} '
f'and {lowest_geog_var} for database {database_mnemonic}')
else:
lowest_geog_var = variable_mnemonic
variables.append(variable_mnemonic)
if not lowest_geog_var and contains_geo_vars:
self.recoverable_error(f'Reading {self.full_filename(filename)} '
'Lowest_Geog_Variable_Flag not set on any geographic '
f'variable for database {database_mnemonic}')
database_to_variables[database_mnemonic] = DatabaseVariables(
variables=variables, lowest_geog_variable=lowest_geog_var)
return database_to_variables
def load_dataset_to_related(self, dataset_mnemonics):
"""Load the related datasets relationships."""
filename = 'Related_Datasets.csv'
columns = [
required('Related_Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
]
related_dataset_rows = self.read_file(
filename, columns,
# There can only be one row for each Related_Dataset_Mnemonic/Dataset_Mnemonic
# combination.
unique_combo_fields=['Related_Dataset_Mnemonic', 'Dataset_Mnemonic'])
ds_to_related_ds_mnemonics = {}
for rel_ds, _ in related_dataset_rows:
append_to_list_in_dict(ds_to_related_ds_mnemonics, rel_ds['Dataset_Mnemonic'],
rel_ds['Related_Dataset_Mnemonic'])
return ds_to_related_ds_mnemonics
def load_dataset_to_keywords(self, dataset_mnemonics):
"""Load keywords associated with each dataset."""
columns = [
required('Dataset_Keyword'),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
optional('Dataset_Keyword_Welsh'),
]
dataset_keyword_rows = self.read_file(
'Dataset_Keyword.csv', columns,
# There can only be one row for each Dataset_Mnemonic/Dataset_Keyword combination.
unique_combo_fields=['Dataset_Mnemonic', 'Dataset_Keyword'])
dataset_to_keywords = {}
for ds_key, _ in dataset_keyword_rows:
append_to_list_in_dict(dataset_to_keywords, ds_key['Dataset_Mnemonic'],
Bilingual(ds_key['Dataset_Keyword'],
ds_key['Dataset_Keyword_Welsh']))
return dataset_to_keywords
def load_dataset_to_publications(self, dataset_mnemonics):
"""Load publications associated with each dataset."""
columns = [
required('Publication_Mnemonic', unique=True),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
optional('Publication_Title'),
optional('Publisher_Name'),
optional('Publisher_Website'),
]
publication_dataset_rows = self.read_file('Publication_Dataset.csv', columns)
dataset_to_pubs = {}
for ds_pub, _ in publication_dataset_rows:
del ds_pub['Id']
append_to_list_in_dict(dataset_to_pubs, ds_pub.pop('Dataset_Mnemonic'),
BilingualDict(ds_pub))
return dataset_to_pubs
def load_dataset_to_releases(self, dataset_mnemonics):
"""Load releases associated with each dataset."""
columns = [
required('Census_Release_Number', validate_fn=isoneof(self.census_releases.keys())),
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
]
release_dataset_rows = self.read_file(
'Release_Dataset.csv', columns,
# There can only be one row for each Dataset_Mnemonic/Census_Release_Number
# combination.
unique_combo_fields=['Dataset_Mnemonic', 'Census_Release_Number'])
dataset_to_releases = {}
for rel_ds, _ in release_dataset_rows:
append_to_list_in_dict(dataset_to_releases, rel_ds['Dataset_Mnemonic'],
self.census_releases[rel_ds['Census_Release_Number']])
return dataset_to_releases
def load_variable_to_keywords(self, variable_mnemonics):
"""Load keywords associated with each variable."""
columns = [
required('Variable_Mnemonic', validate_fn=isoneof(variable_mnemonics)),
required('Variable_Keyword'),
required('Id'),
optional('Variable_Keyword_Welsh'),
]
variable_keyword_rows = self.read_file(
'Variable_Keyword.csv', columns,
# There can only be one row for each Variable_Mnemonic/Variable_Keyword combination.
unique_combo_fields=['Variable_Mnemonic', 'Variable_Keyword'])
variable_to_keywords = {}
for var_key, _ in variable_keyword_rows:
append_to_list_in_dict(variable_to_keywords, var_key['Variable_Mnemonic'],
Bilingual(var_key['Variable_Keyword'],
var_key['Variable_Keyword_Welsh']))
return variable_to_keywords
def load_variable_to_questions(self, variable_mnemonics):
"""Load questions associated with each variable."""
columns = [
required('Source_Question_Code', validate_fn=isoneof(self.questions.keys())),
required('Variable_Mnemonic', validate_fn=isoneof(variable_mnemonics)),
required('Id'),
]
variable_source_question_rows = self.read_file(
'Variable_Source_Question.csv', columns,
# There can only be one row for each Variable_Mnemonic/Source_Question_Code
# combination.
unique_combo_fields=['Variable_Mnemonic', 'Source_Question_Code'])
var_to_src_questions = {}
for src_q, _ in variable_source_question_rows:
append_to_list_in_dict(var_to_src_questions, src_q['Variable_Mnemonic'],
self.questions[src_q['Source_Question_Code']])
return var_to_src_questions
def load_dataset_to_variables(self, dataset_mnemonics):
"""
Load variables associated with each dataset.
Variables can be geographic or non-geographic. Geographic variables will not have
Classification_Mnemonic or Processing_Priority set. If there are geographic variables then
one of them will have Lowest_Geog_Variable_Flag set to Y.
Each non-geographic variable will also have Classification_Mnemonic and Processing_Priority
set. The processing priorities indicate the order of the classifications. The priorities
for each dataset must be unique and sequential, starting from 1. The classifications are
ordered based on the priorities.
The geographic variable with Lowest_Geog_Variable_Flag set to Y (if present) is placed
at the start of the classifications list. There will be a classification with the same
mnemonic as each geographic variable.
"""
filename = 'Dataset_Variable.csv'
columns = [
required('Dataset_Mnemonic', validate_fn=isoneof(dataset_mnemonics)),
required('Id'),
required('Variable_Mnemonic', validate_fn=isoneof(self.variables.keys())),
optional('Classification_Mnemonic', validate_fn=isoneof(self.classifications.keys())),
optional('Processing_Priority', validate_fn=isnumeric),
optional('Lowest_Geog_Variable_Flag', validate_fn=isoneof({'Y', 'N'})),
]
dataset_variable_rows = self.read_file(
filename, columns,
# There can only be one row for each Dataset_Mnemonic/Variable_Mnemonic
# combination.
unique_combo_fields=['Dataset_Mnemonic', 'Variable_Mnemonic'])
ds_to_vars_builder = {}
for ds_variable, row_num in dataset_variable_rows:
dataset_mnemonic = ds_variable['Dataset_Mnemonic']
variable_mnemonic = ds_variable['Variable_Mnemonic']
if dataset_mnemonic not in ds_to_vars_builder:
ds_to_vars_builder[dataset_mnemonic] = DatasetVarsBuilder(
dataset_mnemonic, self.full_filename(filename), self.classifications,
self.recoverable_error)
vars_builder = ds_to_vars_builder[dataset_mnemonic]
if self.variables[variable_mnemonic].private['Is_Geographic']:
vars_builder.add_geographic_variable(ds_variable, row_num)
else:
vars_builder.add_non_geographic_variable(ds_variable, row_num)
ds_to_variables = {}
for dataset_mnemonic, vars_builder in ds_to_vars_builder.items():
ds_to_variables[dataset_mnemonic] = vars_builder.dataset_variables()
return ds_to_variables
def load_classification_to_topics(self, classification_mnemonics):
"""Load topics associated with each classification."""
columns = [
required('Topic_Mnemonic', validate_fn=isoneof(self.topics.keys())),
required('Classification_Mnemonic', validate_fn=isoneof(classification_mnemonics)),
required('Id'),
]
topic_classification_rows = self.read_file(
'Topic_Classification.csv', columns,
# There can only be one row for each Classification_Mnemonic/Topic_Mnemonic
# combination.
unique_combo_fields=['Classification_Mnemonic', 'Topic_Mnemonic'])
classification_to_topics = {}
for topic_class, _ in topic_classification_rows:
append_to_list_in_dict(classification_to_topics,
topic_class['Classification_Mnemonic'],
self.topics[topic_class['Topic_Mnemonic']])
return classification_to_topics
|
from collections import OrderedDict
from datetime import datetime
from os import PathLike
from pathlib import Path
from typing import Collection
import fiona.crs
import numpy
import rasterio
from rasterio.crs import CRS
from rasterio.enums import Resampling
import rasterio.features
import shapely
import shapely.geometry
import shapely.wkt
import xarray
import PyOFS
from PyOFS import (
CRS_EPSG,
DATA_DIRECTORY,
LEAFLET_NODATA_VALUE,
NoDataError,
TIFF_CREATION_OPTIONS,
get_logger,
utilities,
)
LOGGER = get_logger('PyOFS.SMAP')
STUDY_AREA_POLYGON_FILENAME = DATA_DIRECTORY / 'reference' / 'wcofs.gpkg:study_area'
OUTPUT_CRS = fiona.crs.from_epsg(CRS_EPSG)
SOURCE_URLS = OrderedDict(
{
'OpenDAP': OrderedDict(
{
'JPL': 'https://thredds.jpl.nasa.gov/thredds/dodsC/ncml_aggregation/SalinityDensity/smap/aggregate__SMAP_JPL_L3_SSS_CAP_MONTHLY_V42.ncml',
}
)
}
)
class SMAPDataset:
"""
Soil Moisture Active Passive (SMAP) satellite sea-surface salinity.
"""
study_area_transform = None
study_area_extent = None
study_area_bounds = None
study_area_coordinates = None
def __init__(self, study_area_polygon_filename: PathLike = STUDY_AREA_POLYGON_FILENAME):
"""
Retrieve VIIRS NetCDF observation from NOAA with given datetime.
:param study_area_polygon_filename: filename of vector file containing study area boundary
:raises NoDataError: if observation does not exist
"""
if not isinstance(study_area_polygon_filename, Path):
study_area_polygon_filename = Path(study_area_polygon_filename)
self.study_area_polygon_filename = study_area_polygon_filename
for source, source_url in SOURCE_URLS['OpenDAP'].items():
try:
self.dataset = xarray.open_dataset(source_url)
break
except Exception as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
else:
raise NoDataError(f'dataset creation error: no data found in sources')
# construct rectangular polygon of granule extent
lon_min = float(self.dataset.geospatial_lon_min)
lon_max = float(self.dataset.geospatial_lon_max)
lat_min = float(self.dataset.geospatial_lat_min)
lat_max = float(self.dataset.geospatial_lat_max)
if lon_min < lon_max:
self.data_extent = shapely.geometry.Polygon(
[
(lon_min, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(lon_min, lat_min),
]
)
else:
# geospatial bounds cross the antimeridian, so we create a multipolygon
self.data_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(
[
(lon_min, lat_max),
(180, lat_max),
(180, lat_min),
(lon_min, lat_min),
]
),
shapely.geometry.Polygon(
[
(-180, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(-180, lat_min),
]
),
]
)
lon_pixel_size = numpy.mean(numpy.diff(self.dataset['longitude'].values))
lat_pixel_size = numpy.mean(numpy.diff(self.dataset['latitude'].values))
if SMAPDataset.study_area_extent is None:
# get first record in layer
SMAPDataset.study_area_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(polygon[0])
for polygon in utilities.get_first_record(
self.study_area_polygon_filename
)['geometry']['coordinates']
]
)
SMAPDataset.study_area_bounds = SMAPDataset.study_area_extent.bounds
SMAPDataset.study_area_transform = rasterio.transform.from_origin(
SMAPDataset.study_area_bounds[0],
SMAPDataset.study_area_bounds[3],
lon_pixel_size,
lat_pixel_size,
)
if SMAPDataset.study_area_bounds is not None:
self.dataset = self.dataset.sel(
longitude=slice(
SMAPDataset.study_area_bounds[0], SMAPDataset.study_area_bounds[2]
),
latitude=slice(
SMAPDataset.study_area_bounds[3], SMAPDataset.study_area_bounds[1]
),
)
if SMAPDataset.study_area_coordinates is None:
SMAPDataset.study_area_coordinates = {
'lon': self.dataset['longitude'],
'lat': self.dataset['latitude'],
}
def bounds(self) -> tuple:
"""
Get coordinate bounds of observation.
:return: tuple of bounds (west, south, east, north)
"""
return self.data_extent.bounds
def cell_size(self) -> tuple:
"""
Get cell sizes of observation.
:return: tuple of cell sizes (x_size, y_size)
"""
return self.dataset.geospatial_lon_resolution, self.dataset.geospatial_lat_resolution
def data(self, data_time: datetime, variable: str = 'sss') -> numpy.array:
"""
Retrieve SMOS SSS data.
:param data_time: datetime to retrieve (only uses month)
:param variable: SMOS variable to retrieve
:return: array of data
"""
output_data = None
if variable == 'sss':
output_data = self._sss(data_time)
return output_data
def _sss(self, data_time: datetime) -> numpy.array:
"""
Retrieve SMOS SSS data.
:param data_time: datetime to retrieve (only uses month)
:return: array of data
"""
# SMOS has data on month-long resolution
data_time = datetime(data_time.year, data_time.month, 16)
if numpy.datetime64(data_time) in self.dataset['times'].values:
return self.dataset['smap_sss'].sel(times=data_time).values
else:
raise PyOFS.NoDataError(f'No data exists for {data_time:%Y%m%dT%H%M%S}.')
def write_rasters(
self,
output_dir: PathLike,
data_time: datetime,
variables: Collection[str] = tuple(['sss']),
filename_prefix: str = 'smos',
fill_value: float = LEAFLET_NODATA_VALUE,
driver: str = 'GTiff',
):
"""
Write SMOS rasters to file using data from given variables.
:param output_dir: path to output directory
:param data_time: datetime to retrieve (only uses month)
:param variables: variable names to write
:param filename_prefix: prefix for output filenames
:param fill_value: desired fill value of output
:param driver: strings of valid GDAL driver (currently one of 'GTiff', 'GPKG', or 'AAIGrid')
"""
if not isinstance(output_dir, Path):
output_dir = Path(output_dir)
for variable in variables:
input_data = self.data(data_time, variable)
if input_data is not None and not numpy.isnan(input_data).all():
if fill_value is not None:
input_data[numpy.isnan(input_data)] = fill_value
gdal_args = {
'height': input_data.shape[0],
'width': input_data.shape[1],
'count': 1,
'dtype': rasterio.float32,
'crs': CRS.from_dict(OUTPUT_CRS),
'transform': SMAPDataset.study_area_transform,
'nodata': fill_value,
}
if driver == 'AAIGrid':
file_extension = 'asc'
gdal_args.update({'FORCE_CELLSIZE': 'YES'})
elif driver == 'GPKG':
file_extension = 'gpkg'
else:
file_extension = 'tiff'
gdal_args.update(TIFF_CREATION_OPTIONS)
output_filename = output_dir / f'{filename_prefix}_{variable}.{file_extension}'
# use rasterio to write to raster with GDAL args
LOGGER.info(f'Writing to {output_filename}')
with rasterio.open(output_filename, 'w', driver, **gdal_args) as output_raster:
output_raster.write(input_data, 1)
if driver == 'GTiff':
output_raster.build_overviews(
PyOFS.overview_levels(input_data.shape), Resampling['average']
)
output_raster.update_tags(ns='rio_overview', resampling='average')
def __repr__(self):
used_params = []
optional_params = [self.study_area_polygon_filename]
for param in optional_params:
if param is not None:
if 'str' in str(type(param)):
param = f'"{param}"'
else:
param = str(param)
used_params.append(param)
return f'{self.__class__.__name__}({str(', '.join(used_params))})'
if __name__ == '__main__':
output_dir = DATA_DIRECTORY / 'output' / 'test'
smap_dataset = SMAPDataset()
smap_dataset.write_rasters(output_dir, datetime(2018, 12, 1))
print('done')
| from collections import OrderedDict
from datetime import datetime
from os import PathLike
from pathlib import Path
from typing import Collection
import fiona.crs
import numpy
import rasterio
from rasterio.crs import CRS
from rasterio.enums import Resampling
import rasterio.features
import shapely
import shapely.geometry
import shapely.wkt
import xarray
import PyOFS
from PyOFS import (
CRS_EPSG,
DATA_DIRECTORY,
LEAFLET_NODATA_VALUE,
NoDataError,
TIFF_CREATION_OPTIONS,
get_logger,
utilities,
)
LOGGER = get_logger('PyOFS.SMAP')
STUDY_AREA_POLYGON_FILENAME = DATA_DIRECTORY / 'reference' / 'wcofs.gpkg:study_area'
OUTPUT_CRS = fiona.crs.from_epsg(CRS_EPSG)
SOURCE_URLS = OrderedDict(
{
'OpenDAP': OrderedDict(
{
'JPL': 'https://thredds.jpl.nasa.gov/thredds/dodsC/ncml_aggregation/SalinityDensity/smap/aggregate__SMAP_JPL_L3_SSS_CAP_MONTHLY_V42.ncml',
}
)
}
)
class SMAPDataset:
"""
Soil Moisture Active Passive (SMAP) satellite sea-surface salinity.
"""
study_area_transform = None
study_area_extent = None
study_area_bounds = None
study_area_coordinates = None
def __init__(self, study_area_polygon_filename: PathLike = STUDY_AREA_POLYGON_FILENAME):
"""
Retrieve VIIRS NetCDF observation from NOAA with given datetime.
:param study_area_polygon_filename: filename of vector file containing study area boundary
:raises NoDataError: if observation does not exist
"""
if not isinstance(study_area_polygon_filename, Path):
study_area_polygon_filename = Path(study_area_polygon_filename)
self.study_area_polygon_filename = study_area_polygon_filename
for source, source_url in SOURCE_URLS['OpenDAP'].items():
try:
self.dataset = xarray.open_dataset(source_url)
break
except Exception as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
else:
raise NoDataError(f'dataset creation error: no data found in sources')
# construct rectangular polygon of granule extent
lon_min = float(self.dataset.geospatial_lon_min)
lon_max = float(self.dataset.geospatial_lon_max)
lat_min = float(self.dataset.geospatial_lat_min)
lat_max = float(self.dataset.geospatial_lat_max)
if lon_min < lon_max:
self.data_extent = shapely.geometry.Polygon(
[
(lon_min, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(lon_min, lat_min),
]
)
else:
# geospatial bounds cross the antimeridian, so we create a multipolygon
self.data_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(
[
(lon_min, lat_max),
(180, lat_max),
(180, lat_min),
(lon_min, lat_min),
]
),
shapely.geometry.Polygon(
[
(-180, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(-180, lat_min),
]
),
]
)
lon_pixel_size = numpy.mean(numpy.diff(self.dataset['longitude'].values))
lat_pixel_size = numpy.mean(numpy.diff(self.dataset['latitude'].values))
if SMAPDataset.study_area_extent is None:
# get first record in layer
SMAPDataset.study_area_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(polygon[0])
for polygon in utilities.get_first_record(
self.study_area_polygon_filename
)['geometry']['coordinates']
]
)
SMAPDataset.study_area_bounds = SMAPDataset.study_area_extent.bounds
SMAPDataset.study_area_transform = rasterio.transform.from_origin(
SMAPDataset.study_area_bounds[0],
SMAPDataset.study_area_bounds[3],
lon_pixel_size,
lat_pixel_size,
)
if SMAPDataset.study_area_bounds is not None:
self.dataset = self.dataset.sel(
longitude=slice(
SMAPDataset.study_area_bounds[0], SMAPDataset.study_area_bounds[2]
),
latitude=slice(
SMAPDataset.study_area_bounds[3], SMAPDataset.study_area_bounds[1]
),
)
if SMAPDataset.study_area_coordinates is None:
SMAPDataset.study_area_coordinates = {
'lon': self.dataset['longitude'],
'lat': self.dataset['latitude'],
}
def bounds(self) -> tuple:
"""
Get coordinate bounds of observation.
:return: tuple of bounds (west, south, east, north)
"""
return self.data_extent.bounds
def cell_size(self) -> tuple:
"""
Get cell sizes of observation.
:return: tuple of cell sizes (x_size, y_size)
"""
return self.dataset.geospatial_lon_resolution, self.dataset.geospatial_lat_resolution
def data(self, data_time: datetime, variable: str = 'sss') -> numpy.array:
"""
Retrieve SMOS SSS data.
:param data_time: datetime to retrieve (only uses month)
:param variable: SMOS variable to retrieve
:return: array of data
"""
output_data = None
if variable == 'sss':
output_data = self._sss(data_time)
return output_data
def _sss(self, data_time: datetime) -> numpy.array:
"""
Retrieve SMOS SSS data.
:param data_time: datetime to retrieve (only uses month)
:return: array of data
"""
# SMOS has data on month-long resolution
data_time = datetime(data_time.year, data_time.month, 16)
if numpy.datetime64(data_time) in self.dataset['times'].values:
return self.dataset['smap_sss'].sel(times=data_time).values
else:
raise PyOFS.NoDataError(f'No data exists for {data_time:%Y%m%dT%H%M%S}.')
def write_rasters(
self,
output_dir: PathLike,
data_time: datetime,
variables: Collection[str] = tuple(['sss']),
filename_prefix: str = 'smos',
fill_value: float = LEAFLET_NODATA_VALUE,
driver: str = 'GTiff',
):
"""
Write SMOS rasters to file using data from given variables.
:param output_dir: path to output directory
:param data_time: datetime to retrieve (only uses month)
:param variables: variable names to write
:param filename_prefix: prefix for output filenames
:param fill_value: desired fill value of output
:param driver: strings of valid GDAL driver (currently one of 'GTiff', 'GPKG', or 'AAIGrid')
"""
if not isinstance(output_dir, Path):
output_dir = Path(output_dir)
for variable in variables:
input_data = self.data(data_time, variable)
if input_data is not None and not numpy.isnan(input_data).all():
if fill_value is not None:
input_data[numpy.isnan(input_data)] = fill_value
gdal_args = {
'height': input_data.shape[0],
'width': input_data.shape[1],
'count': 1,
'dtype': rasterio.float32,
'crs': CRS.from_dict(OUTPUT_CRS),
'transform': SMAPDataset.study_area_transform,
'nodata': fill_value,
}
if driver == 'AAIGrid':
file_extension = 'asc'
gdal_args.update({'FORCE_CELLSIZE': 'YES'})
elif driver == 'GPKG':
file_extension = 'gpkg'
else:
file_extension = 'tiff'
gdal_args.update(TIFF_CREATION_OPTIONS)
output_filename = output_dir / f'{filename_prefix}_{variable}.{file_extension}'
# use rasterio to write to raster with GDAL args
LOGGER.info(f'Writing to {output_filename}')
with rasterio.open(output_filename, 'w', driver, **gdal_args) as output_raster:
output_raster.write(input_data, 1)
if driver == 'GTiff':
output_raster.build_overviews(
PyOFS.overview_levels(input_data.shape), Resampling['average']
)
output_raster.update_tags(ns='rio_overview', resampling='average')
def __repr__(self):
used_params = []
optional_params = [self.study_area_polygon_filename]
for param in optional_params:
if param is not None:
if 'str' in str(type(param)):
param = f'"{param}"'
else:
param = str(param)
used_params.append(param)
return f'{self.__class__.__name__}({str(", ".join(used_params))})'
if __name__ == '__main__':
output_dir = DATA_DIRECTORY / 'output' / 'test'
smap_dataset = SMAPDataset()
smap_dataset.write_rasters(output_dir, datetime(2018, 12, 1))
print('done')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.