hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d567f9f6df6126465681cfaa1ad690dc7718551 | 43 | py | Python | Classifier/preprocess/edge_enhancement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null | Classifier/preprocess/edge_enhancement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null | Classifier/preprocess/edge_enhancement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null |
def remove_noise():
pass
# TODO | 6.142857 | 19 | 0.534884 | 5 | 43 | 4.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.372093 | 43 | 7 | 20 | 6.142857 | 0.814815 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 1 | 0.5 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
5d59f65bf92afbe0d8d917a761494d91a7d8d3ba | 2,756 | py | Python | voxel_globe/meta/migrations/0002_optional_serivce_instance.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 28 | 2015-07-27T23:57:24.000Z | 2020-04-05T15:10:52.000Z | voxel_globe/meta/migrations/0002_optional_serivce_instance.py | VisionSystemsInc/voxel_globe | 6eb3fca5586726428e9d914f7b730ca164c64a52 | [
"MIT"
] | 50 | 2016-02-11T15:50:22.000Z | 2016-10-27T22:38:27.000Z | voxel_globe/meta/migrations/0002_optional_serivce_instance.py | ngageoint/voxel-globe | 91f386de652b704942165889c10468b2c4cf4eec | [
"MIT"
] | 8 | 2015-07-27T19:22:03.000Z | 2021-01-04T09:44:48.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('meta', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='camera',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='cameraset',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='controlpoint',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='coordinatesystem',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='coordinatetransform',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='image',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='imageset',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='pointcloud',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='sattelsite',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='scene',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='tiepoint',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='tiepointset',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
migrations.AlterField(
model_name='voxelworld',
name='service',
field=models.ForeignKey(blank=True, to='meta.ServiceInstance', null=True),
),
]
| 34.45 | 86 | 0.573295 | 244 | 2,756 | 6.397541 | 0.17623 | 0.16656 | 0.2082 | 0.241512 | 0.805894 | 0.805894 | 0.805894 | 0.805894 | 0.805894 | 0.805894 | 0 | 0.002585 | 0.298258 | 2,756 | 79 | 87 | 34.886076 | 0.80455 | 0.00762 | 0 | 0.712329 | 0 | 0 | 0.181486 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027397 | 0 | 0.068493 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
53802bb36c2e9738c1638170a183fc79c996f1fc | 11,794 | py | Python | moabb/paradigms/motor_imagery.py | mirfaridmusavian/moabb | d042be7b658d22dc160312a852c3c8918fa53797 | [
"BSD-3-Clause"
] | null | null | null | moabb/paradigms/motor_imagery.py | mirfaridmusavian/moabb | d042be7b658d22dc160312a852c3c8918fa53797 | [
"BSD-3-Clause"
] | null | null | null | moabb/paradigms/motor_imagery.py | mirfaridmusavian/moabb | d042be7b658d22dc160312a852c3c8918fa53797 | [
"BSD-3-Clause"
] | null | null | null | """Motor Imagery Paradigms"""
import abc
import logging
from moabb.paradigms.base import BaseParadigm
from moabb.datasets import utils
from moabb.datasets.fake import FakeDataset
log = logging.getLogger()
class BaseMotorImagery(BaseParadigm):
"""Base Motor imagery paradigm.
Please use one of the child classes
Parameters
----------
filters: list of list (defaults [[7, 35]])
bank of bandpass filter to apply.
events: List of str | None (default None)
event to use for epoching. If None, default to all events defined in
the dataset.
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
channels: list of str | None (default None)
list of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(self, filters=([7, 35],), events=None, tmin=0.0, tmax=None,
channels=None, resample=None):
super().__init__()
self.filters = filters
self.channels = channels
self.events = events
self.resample = resample
if (tmax is not None):
if tmin >= tmax:
raise(ValueError("tmax must be greater than tmin"))
self.tmin = tmin
self.tmax = tmax
def is_valid(self, dataset):
ret = True
if not (dataset.paradigm == 'imagery'):
ret = False
# check if dataset has required events
if self.events:
if not set(self.events) <= set(dataset.event_id.keys()):
ret = False
# we should verify list of channels, somehow
return ret
@abc.abstractmethod
def used_events(self, dataset):
pass
@property
def datasets(self):
if self.tmax is None:
interval = None
else:
interval = self.tmax - self.tmin
return utils.dataset_search(paradigm='imagery',
events=self.events,
interval=interval,
has_all_events=True)
@property
def scoring(self):
return 'accuracy'
class SinglePass(BaseMotorImagery):
"""Single Bandpass filter motot Imagery.
Motor imagery paradigm with only one bandpass filter (default 8 to 32 Hz)
Parameters
----------
fmin: float (default 8)
cutoff frequency (Hz) for the high pass filter
fmax: float (default 32)
cutoff frequency (Hz) for the low pass filter
events: List of str | None (default None)
event to use for epoching. If None, default to all events defined in
the dataset.
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
channels: list of str | None (default None)
list of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(self, fmin=8, fmax=32, **kwargs):
if 'filters' in kwargs.keys():
raise(ValueError("MotorImagery does not take argument filters"))
super().__init__(filters=[[fmin, fmax]], **kwargs)
class FilterBank(BaseMotorImagery):
"""Filter Bank MI."""
def __init__(self, filters=([8, 12], [12, 16], [16, 20], [20, 24],
[24, 28], [28, 32]), **kwargs):
"""init"""
super().__init__(filters=filters, **kwargs)
class LeftRightImagery(SinglePass):
"""Motor Imagery for left hand/right hand classification
Metric is 'roc_auc'
"""
def __init__(self, **kwargs):
if 'events' in kwargs.keys():
raise(ValueError('LeftRightImagery dont accept events'))
super().__init__(events=['left_hand', 'right_hand'], **kwargs)
def used_events(self, dataset):
return {ev: dataset.event_id[ev] for ev in self.events}
@property
def scoring(self):
return 'roc_auc'
class FilterBankLeftRightImagery(FilterBank):
"""Filter Bank Motor Imagery for left hand/right hand classification
Metric is 'roc_auc'
"""
def __init__(self, **kwargs):
if 'events' in kwargs.keys():
raise(ValueError('LeftRightImagery dont accept events'))
super().__init__(events=['left_hand', 'right_hand'], **kwargs)
def used_events(self, dataset):
return {ev: dataset.event_id[ev] for ev in self.events}
@property
def scoring(self):
return 'roc_auc'
class FilterBankMotorImagery(FilterBank):
"""
Filter bank n-class motor imagery.
Metric is 'roc-auc' if 2 classes and 'accuracy' if more
Parameters
-----------
events: List of str
event labels used to filter datasets (e.g. if only motor imagery is
desired).
n_classes: int,
number of classes each dataset must have. If events is given,
requires all imagery sorts to be within the events list.
"""
def __init__(self, n_classes=2, **kwargs):
"docstring"
super().__init__(**kwargs)
self.n_classes = n_classes
if self.events is None:
log.warning("Choosing from all possible events")
else:
assert n_classes <= len(
self.events), 'More classes than events specified'
def is_valid(self, dataset):
ret = True
if not dataset.paradigm == 'imagery':
ret = False
if self.events is None:
if not len(dataset.event_id) >= self.n_classes:
ret = False
else:
overlap = len(set(self.events) & set(dataset.event_id.keys()))
if not overlap >= self.n_classes:
ret = False
return ret
def used_events(self, dataset):
out = {}
if self.events is None:
for k, v in dataset.event_id.items():
out[k] = v
if len(out) == self.n_classes:
break
else:
for event in self.events:
if event in dataset.event_id.keys():
out[event] = dataset.event_id[event]
if len(out) == self.n_classes:
break
if len(out) < self.n_classes:
raise(ValueError(f"Dataset {dataset.code} did not have enough "
f"events in {self.events} to run analysis"))
return out
@property
def datasets(self):
if self.tmax is None:
interval = None
else:
interval = self.tmax - self.tmin
return utils.dataset_search(paradigm='imagery',
events=self.events,
total_classes=self.n_classes,
interval=interval,
has_all_events=False)
@property
def scoring(self):
if self.n_classes == 2:
return 'roc_auc'
else:
return 'accuracy'
class MotorImagery(SinglePass):
"""
N-class motor imagery.
Metric is 'roc-auc' if 2 classes and 'accuracy' if more
Parameters
-----------
events: List of str
event labels used to filter datasets (e.g. if only motor imagery is
desired).
n_classes: int,
number of classes each dataset must have. If events is given,
requires all imagery sorts to be within the events list.
fmin: float (default 8)
cutoff frequency (Hz) for the high pass filter
fmax: float (default 32)
cutoff frequency (Hz) for the low pass filter
tmin: float (default 0.0)
Start time (in second) of the epoch, relative to the dataset specific
task interval e.g. tmin = 1 would mean the epoch will start 1 second
after the begining of the task as defined by the dataset.
tmax: float | None, (default None)
End time (in second) of the epoch, relative to the begining of the
dataset specific task interval. tmax = 5 would mean the epoch will end
5 second after the begining of the task as defined in the dataset. If
None, use the dataset value.
channels: list of str | None (default None)
list of channel to select. If None, use all EEG channels available in
the dataset.
resample: float | None (default None)
If not None, resample the eeg data with the sampling rate provided.
"""
def __init__(self, n_classes=2, **kwargs):
super().__init__(**kwargs)
self.n_classes = n_classes
if self.events is None:
log.warning("Choosing from all possible events")
else:
assert n_classes <= len(
self.events), 'More classes than events specified'
def is_valid(self, dataset):
ret = True
if not dataset.paradigm == 'imagery':
ret = False
if self.events is None:
if not len(dataset.event_id) >= self.n_classes:
ret = False
else:
overlap = len(set(self.events) & set(dataset.event_id.keys()))
if not overlap >= self.n_classes:
ret = False
return ret
def used_events(self, dataset):
out = {}
if self.events is None:
for k, v in dataset.event_id.items():
out[k] = v
if len(out) == self.n_classes:
break
else:
for event in self.events:
if event in dataset.event_id.keys():
out[event] = dataset.event_id[event]
if len(out) == self.n_classes:
break
if len(out) < self.n_classes:
raise(ValueError(f"Dataset {dataset.code} did not have enough "
f"events in {self.events} to run analysis"))
return out
@property
def datasets(self):
if self.tmax is None:
interval = None
else:
interval = self.tmax - self.tmin
return utils.dataset_search(paradigm='imagery',
events=self.events,
interval=interval,
has_all_events=False)
@property
def scoring(self):
if self.n_classes == 2:
return 'roc_auc'
else:
return 'accuracy'
class FakeImageryParadigm(LeftRightImagery):
"""Fake Imagery for left hand/right hand classification.
"""
@property
def datasets(self):
return [FakeDataset(['left_hand', 'right_hand'], paradigm='imagery')]
| 31.367021 | 78 | 0.583263 | 1,471 | 11,794 | 4.593474 | 0.131203 | 0.027231 | 0.030191 | 0.021311 | 0.805239 | 0.793547 | 0.793547 | 0.781412 | 0.77638 | 0.77638 | 0 | 0.008491 | 0.330931 | 11,794 | 375 | 79 | 31.450667 | 0.847801 | 0.362049 | 0 | 0.774194 | 0 | 0 | 0.088273 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 1 | 0.129032 | false | 0.021505 | 0.026882 | 0.032258 | 0.295699 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
538292cabc1a87593ffc50993f90c717a39d53aa | 2,970 | py | Python | day08.py | andreassjoberg/advent-of-code-2017 | cc982f37da5e4c50f076e65dc3b9d074b40facce | [
"MIT"
] | 2 | 2019-02-06T07:48:00.000Z | 2020-04-12T09:53:10.000Z | day08.py | andreassjoberg/advent-of-code-2017 | cc982f37da5e4c50f076e65dc3b9d074b40facce | [
"MIT"
] | null | null | null | day08.py | andreassjoberg/advent-of-code-2017 | cc982f37da5e4c50f076e65dc3b9d074b40facce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Day 08 of advent of code"""
def part_one(data):
"""Part one"""
variables = {}
for line in data.splitlines():
commands = line.split(' ')
variables[commands[0]] = 0
for line in data.splitlines():
commands = line.split(' ')
operator = commands[5]
if commands[1] == 'inc':
value = 1
else:
value = -1
if operator == '>':
if variables[commands[4]] > int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '>=':
if variables[commands[4]] >= int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '<':
if variables[commands[4]] < int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '<=':
if variables[commands[4]] <= int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '==':
if variables[commands[4]] == int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '!=':
if variables[commands[4]] != int(commands[6]):
variables[commands[0]] += value * int(commands[2])
return variables[max(variables, key=variables.get)]
def part_two(data):
"""Part two"""
variables = {}
maximum = 0
for line in data.splitlines():
commands = line.split(' ')
variables[commands[0]] = 0
for line in data.splitlines():
commands = line.split(' ')
operator = commands[5]
if commands[1] == 'inc':
value = 1
else:
value = -1
if operator == '>':
if variables[commands[4]] > int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '>=':
if variables[commands[4]] >= int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '<':
if variables[commands[4]] < int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '<=':
if variables[commands[4]] <= int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '==':
if variables[commands[4]] == int(commands[6]):
variables[commands[0]] += value * int(commands[2])
elif operator == '!=':
if variables[commands[4]] != int(commands[6]):
variables[commands[0]] += value * int(commands[2])
maximum = max(variables[max(variables, key=variables.get)], maximum)
return maximum
if __name__ == '__main__':
with open('day08.input', 'r') as f:
INPUT_DATA = f.read()
print part_one(INPUT_DATA)
print part_two(INPUT_DATA)
| 37.125 | 76 | 0.521549 | 324 | 2,970 | 4.734568 | 0.154321 | 0.288136 | 0.164276 | 0.211213 | 0.853325 | 0.853325 | 0.806389 | 0.806389 | 0.806389 | 0.805737 | 0 | 0.031816 | 0.312121 | 2,970 | 79 | 77 | 37.594937 | 0.719041 | 0.006734 | 0 | 0.84058 | 0 | 0 | 0.017295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.028986 | 0 | 0 | 0 | null | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
53aedef05dee1285db85ae664630321f90181797 | 580,126 | py | Python | tests/test_vpcbuilder_all.py | ElendelOSS/VPCBuilder | 4cb0cc08571b3f8b5e0579165a6cc13be805b1f5 | [
"MIT"
] | 7 | 2018-11-12T04:59:28.000Z | 2021-08-19T06:14:44.000Z | tests/test_vpcbuilder_all.py | ElendelOSS/VPCBuilder | 4cb0cc08571b3f8b5e0579165a6cc13be805b1f5 | [
"MIT"
] | null | null | null | tests/test_vpcbuilder_all.py | ElendelOSS/VPCBuilder | 4cb0cc08571b3f8b5e0579165a6cc13be805b1f5 | [
"MIT"
] | 4 | 2018-11-11T22:46:35.000Z | 2021-05-04T04:01:27.000Z | import unittest
import json
import yaml
import src.macro
from mock import MagicMock
class TestVPCBuilderCoreLogicSetup(unittest.TestCase):
identifier = "TEST"
def setUp(self):
self.maxDiff = 10
class TestVPCBuilderCoreLogic(TestVPCBuilderCoreLogicSetup):
def test_macro_all_objects(self):
transform_call = {
"transformId": "801604450668::VPC",
"templateParameterValues": {
"VGW": "vgw-06bbcf429c1cb0eed"
},
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"VPC": {
"Type": "Versent::Network::VPC",
"Properties": {
"Subnets": {
"ReservedMgmt1": {
"CIDR": "172.16.0.0/26",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 0
},
"ReservedMgmt2": {
"CIDR": "172.16.1.0/26",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 1
},
"ReservedMgmt3": {
"CIDR": "172.16.2.0/26",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 2
},
"Internal1": {
"CIDR": "172.16.3.0/24",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 6
},
"Internal2": {
"CIDR": "172.16.4.0/24",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 7
},
"Internal3": {
"CIDR": "172.16.5.0/24",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 8
},
"ReservedNet3": {
"CIDR": "172.16.2.192/26",
"AZ": 2,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 9
},
"ReservedNet2": {
"CIDR": "172.16.1.192/26",
"AZ": 1,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 10
},
"ReservedNet1": {
"CIDR": "172.16.0.192/26",
"AZ": 0,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 11
},
"PerimeterInternal1": {
"CIDR": "172.16.6.0/24",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 3
},
"PerimeterInternal2": {
"CIDR": "172.16.7.0/24",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 4
},
"PerimeterInternal3": {
"CIDR": "172.16.8.0/24",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 5
}
},
"TransitGateways": {
"Test1": {
"Subnets": [
"Internal1",
"Internal2",
"Internal3"
],
"TransitGatewayId": "tgw-01234567890123456",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW1",
"Purpose": "Gateway Attach 1"
}
},
"Test2": {
"Subnets": [
"Internal1",
"Internal2",
"Internal3"
],
"TransitGatewayId": "tgw-98765432109876543",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW2",
"Purpose": "Gateway Attach 2"
},
"RouteTables": {
"InternalRT1": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
],
"InternalRT2": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
],
"InternalRT3": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
]
}
}
},
"Tags": {
"Name": "PRIVATE-EGRESS-VPC",
"Template": "VPC for private endpoints egress only"
},
"NATGateways": {
"NATGW3": {
"Subnet": "ReservedNet3",
"Routetable": "InternalRT3"
},
"NATGW2": {
"Subnet": "ReservedNet2",
"Routetable": "InternalRT2"
},
"NATGW1": {
"Subnet": "ReservedNet1",
"Routetable": "InternalRT1"
}
},
"NetworkACLs": {
"InternalSubnetAcl": {
"InternalSubnetAclEntryOutTCPUnreserved": "106,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOutUDPDNSIPv6": "113,17,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutUDPUnreserved": "107,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOut": "100,-1,allow,true,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryOutSSH": "150,6,allow,true,0.0.0.0/0,22,22",
"InternalSubnetAclEntryInUDPUnreservedIPv6": "105,17,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutTCPDNSIPv6": "112,6,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutTCPDNS": "110,6,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryOutHTTPS": "103,6,allow,true,0.0.0.0/0,443,443",
"InternalSubnetAclEntryOutHTTP": "102,6,allow,true,0.0.0.0/0,80,80",
"InternalSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"InternalSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"InternalSubnetAclEntryInTCPUnreservedIPv6": "104,6,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutUDPDNS": "111,17,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryIn": "100,-1,allow,false,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryInTCPUnreserved": "102,6,allow,false,0.0.0.0/0,1024,65535",
"InternalSubnetAclEntryInUDPUnreserved": "103,17,allow,false,0.0.0.0/0,1024,65535"
},
"RestrictedSubnetAcl": {
"RestrictedSubnetAclEntryInUDPUnReserved": "91,17,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutSSH": "103,6,allow,true,0.0.0.0/0,22,22",
"RestrictedSubnetAclEntryOutDNSTCPIPv6": "151,6,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": "92,6,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryNTP": "120,6,allow,true,0.0.0.0/0,123,123",
"RestrictedSubnetAclEntryOutPuppet": "94,6,allow,true,172.16.0.0/16,8140,8140",
"RestrictedSubnetAclEntryIn": "110,-1,allow,false,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTP": "101,6,allow,true,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPSIPv6": "104,6,allow,false,::/0,443,443",
"RestrictedSubnetAclEntryInNetBios": "170,6,allow,false,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutDNSTCP": "150,6,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": "93,17,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryInHTTP": "101,6,allow,false,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPIPv6": "103,6,allow,false,::/0,80,80",
"RestrictedSubnetAclEntryOutDNSUDP": "160,17,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInTCPUnReserved": "90,6,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutTCPUnReserved": "90,6,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInDNSTCP": "150,6,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": "93,17,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutNetBios1": "180,6,allow,true,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOut": "110,-1,allow,true,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"RestrictedSubnetAclEntryOutHTTPS": "102,6,allow,true,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryOutNetBios": "170,6,allow,true,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": "92,6,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutUDPUnReserved": "91,17,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInNetBios1": "80,6,allow,false,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOutSSHIPv6": "106,6,allow,true,::/0,22,22",
"RestrictedSubnetAclEntryInHTTPS": "102,6,allow,false,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryInDNSUDP": "160,17,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutDNSUDPIPv6": "161,17,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryInSquid2": "140,6,allow,false,172.16.0.0/16,3128,3128"
}
},
"SecurityGroups": {
"VPCEndpoint": {
"SecurityGroupIngress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
],
"Tags": {
"Name": "VPCEndpoint"
},
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"SecurityGroupEgress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
]
}
},
"Details": {
"VPCDesc": "Private Egress VPC",
"Region": "ap-southeast-2",
"VPCName": "PRIVATEEGRESSVPC",
"IPv6": "true"
},
"DHCP": {
"NTPServers": "169.254.169.123",
"NTBType": 2,
"Name": "DhcpOptions",
"DNSServers": "172.16.0.2"
},
"CIDR": "172.16.0.0/20",
"Endpoints": {
"kinesis-streams": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudtrail": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudformation": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"elasticloadbalancing": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ec2": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"logs": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"monitoring": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"s3": {
"RouteTableIds": [
"PublicRT",
"InternalRT1",
"InternalRT2",
"InternalRT3"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"dynamodb": {
"RouteTableIds": [
"PublicRT",
"InternalRT1",
"InternalRT2",
"InternalRT3"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"ec2messages": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"kms": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"config": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"events": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.api": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssm": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sns": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.runtime": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"codebuild": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"servicecatalog": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"execute-api": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"secretsmanager": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssmmessages": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
}
},
"RouteTables": {
"InternalRT3": "",
"PublicRT": [
{
"RouteName": "PublicRoute",
"RouteCIDR": "0.0.0.0/0",
"RouteGW": "InternetGateway"
},
{
"RouteName": "PublicRouteIPv6",
"RouteCIDR": "::/0",
"RouteGW": "InternetGateway"
}
],
"InternalRT2": "",
"InternalRT1": ""
}
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
}
},
"Mappings": {}
},
"region": "us-east-1",
"params": {},
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"accountId": "012345678901"
}
test_assert = {
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"status": "success",
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"PRIVATEEGRESSVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "172.16.0.0/20",
"EnableDnsHostnames": True,
"EnableDnsSupport": True,
"InstanceTenancy": "default",
"Tags": [
{
"Key": "Name",
"Value": "PRIVATEEGRESSVPC"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"IPv6Block": {
"Type": "AWS::EC2::VPCCidrBlock",
"Properties": {
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AmazonProvidedIpv6CidrBlock": True
}
},
"EgressGateway": {
"Type": "AWS::EC2::EgressOnlyInternetGateway",
"Properties": {
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"DhcpOptions": {
"Type": "AWS::EC2::DHCPOptions",
"Properties": {
"DomainNameServers": [
"172.16.0.2"
],
"NtpServers": [
"169.254.169.123"
],
"NetbiosNodeType": 2,
"Tags": [
{
"Key": "Name",
"Value": "DhcpOptions"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"DhcpOptionsAssociation": {
"Type": "AWS::EC2::VPCDHCPOptionsAssociation",
"Properties": {
"DhcpOptionsId": {
"Ref": "DhcpOptions"
},
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternetGateway": {
"Type": "AWS::EC2::InternetGateway",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternetGateway"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"IGWVPCGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"InternetGatewayId": {
"Ref": "InternetGateway"
},
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"VPCGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"VpnGatewayId": {
"Ref": "VGW"
}
}
},
"VPCFlowLogsRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"vpc-flow-logs.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
"Path": "/",
"Policies": [
{
"PolicyName": "root",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
}
]
}
}
]
}
},
"VPCFlowLogs": {
"Type": "AWS::EC2::FlowLog",
"Properties": {
"DeliverLogsPermissionArn": {
"Fn::GetAtt": [
"VPCFlowLogsRole",
"Arn"
]
},
"LogGroupName": "FlowLogsGroup",
"ResourceId": {
"Ref": "PRIVATEEGRESSVPC"
},
"ResourceType": "VPC",
"TrafficType": "ALL"
}
},
"Test1TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-01234567890123456",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SubnetIds": [
{
"Ref": "Internal1"
},
{
"Ref": "Internal2"
},
{
"Ref": "Internal3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW1"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"Test2TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-98765432109876543",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SubnetIds": [
{
"Ref": "Internal1"
},
{
"Ref": "Internal2"
},
{
"Ref": "Internal3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW2"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"Test2InternalRT1Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT1Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"InternalRT3": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalRT3RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "InternalRT3"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"PublicRT": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PublicRT"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"PublicRTRoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "PublicRT"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"PublicRoute": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": {
"Ref": "InternetGateway"
},
"RouteTableId": {
"Ref": "PublicRT"
}
}
},
"PublicRouteIPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"GatewayId": {
"Ref": "InternetGateway"
},
"RouteTableId": {
"Ref": "PublicRT"
}
}
},
"InternalRT2": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalRT2RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "InternalRT2"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"InternalRT1": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalRT1RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "InternalRT1"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"ReservedMgmt1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.0.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
0,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "ReservedMgmt1"
}
}
},
"ReservedMgmt1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt1"
}
}
},
"ReservedMgmt2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.1.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
1,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "ReservedMgmt2"
}
}
},
"ReservedMgmt2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt2"
}
}
},
"ReservedMgmt3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.2.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
2,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "ReservedMgmt3"
}
}
},
"ReservedMgmt3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt3"
}
}
},
"Internal1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.3.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
6,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "Internal1"
}
}
},
"Internal1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal1"
}
}
},
"Internal2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.4.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
7,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "Internal2"
}
}
},
"Internal2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal2"
}
}
},
"Internal3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.5.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
8,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "Internal3"
}
}
},
"Internal3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal3"
}
}
},
"ReservedNet3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.2.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
9,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet3"
}
}
},
"ReservedNet3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet3"
}
}
},
"ReservedNet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.1.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
10,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet2"
}
}
},
"ReservedNet2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet2"
}
}
},
"ReservedNet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.0.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
11,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet1"
}
}
},
"ReservedNet1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet1"
}
}
},
"PerimeterInternal1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.6.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
3,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "PerimeterInternal1"
}
}
},
"PerimeterInternal1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal1"
}
}
},
"PerimeterInternal2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.7.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
4,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "PerimeterInternal2"
}
}
},
"PerimeterInternal2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal2"
}
}
},
"PerimeterInternal3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.8.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
5,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "PerimeterInternal3"
}
}
},
"PerimeterInternal3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal3"
}
}
},
"InternalSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalSubnetAclEntryOutTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"InternalSubnetAclEntryOutUDPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 113
}
},
"InternalSubnetAclEntryOutUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 107
}
},
"InternalSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"InternalSubnetAclEntryInUDPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryOutTCPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 112
}
},
"InternalSubnetAclEntryOutTCPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"InternalSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"InternalSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryInTCPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutUDPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 111
}
},
"InternalSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryInTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryInUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "RestrictedSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"RestrictedSubnetAclEntryInUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSTCPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 151
}
},
"RestrictedSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryNTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 123,
"To": 123
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 120
}
},
"RestrictedSubnetAclEntryOutPuppet": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 8140,
"To": 8140
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 94
}
},
"RestrictedSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryInNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryInHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryInTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryOutTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryInDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryOutNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 180
}
},
"RestrictedSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryOutNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryOutUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryInNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 80
}
},
"RestrictedSubnetAclEntryOutSSHIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"RestrictedSubnetAclEntryInHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryInDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryOutDNSUDPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 161
}
},
"RestrictedSubnetAclEntryInSquid2": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 3128,
"To": 3128
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 140
}
},
"EIPNATGW3": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW3": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW3",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet3"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT3NATGW3": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW3"
},
"RouteTableId": {
"Ref": "InternalRT3"
}
}
},
"InternalRT3NATGW3IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT3"
}
}
},
"EIPNATGW2": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW2": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW2",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet2"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT2NATGW2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW2"
},
"RouteTableId": {
"Ref": "InternalRT2"
}
}
},
"InternalRT2NATGW2IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT2"
}
}
},
"EIPNATGW1": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW1": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW1",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet1"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT1NATGW1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW1"
},
"RouteTableId": {
"Ref": "InternalRT1"
}
}
},
"InternalRT1NATGW1IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT1"
}
}
},
"VPCEndpoint": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupName": "VPCEndpoint",
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SecurityGroupIngress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"SecurityGroupEgress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"Tags": [
{
"Key": "Name",
"Value": "VPCEndpoint"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"kinesisstreamsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kinesis-streams"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudtrailEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudtrail"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudformationEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudformation"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"elasticloadbalancingEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".elasticloadbalancing"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ec2EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"logsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".logs"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"monitoringEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".monitoring"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"s3EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".s3"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "InternalRT1"
},
{
"Ref": "InternalRT2"
},
{
"Ref": "InternalRT3"
}
]
}
},
"dynamodbEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".dynamodb"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "InternalRT1"
},
{
"Ref": "InternalRT2"
},
{
"Ref": "InternalRT3"
}
]
}
},
"ec2messagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2messages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"kmsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kms"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"configEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".config"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"eventsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".events"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssm"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"snsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sns"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerruntimeEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.runtime"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"codebuildEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".codebuild"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"servicecatalogEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".servicecatalog"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"executeapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".execute-api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"secretsmanagerEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".secretsmanager"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmmessagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssmmessages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
}
},
"Mappings": {},
"Outputs": {
"PRIVATEEGRESSVPC": {
"Description": "PRIVATEEGRESSVPC",
"Value": {
"Ref": "PRIVATEEGRESSVPC"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-VPCid"
}
}
},
"InternalRT3": {
"Description": "InternalRT3",
"Value": {
"Ref": "InternalRT3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT3"
}
}
},
"PublicRT": {
"Description": "PublicRT",
"Value": {
"Ref": "PublicRT"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PublicRT"
}
}
},
"InternalRT2": {
"Description": "InternalRT2",
"Value": {
"Ref": "InternalRT2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT2"
}
}
},
"InternalRT1": {
"Description": "InternalRT1",
"Value": {
"Ref": "InternalRT1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT1"
}
}
},
"ReservedMgmt1": {
"Description": "ReservedMgmt1",
"Value": {
"Ref": "ReservedMgmt1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt1"
}
}
},
"ReservedMgmt2": {
"Description": "ReservedMgmt2",
"Value": {
"Ref": "ReservedMgmt2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt2"
}
}
},
"ReservedMgmt3": {
"Description": "ReservedMgmt3",
"Value": {
"Ref": "ReservedMgmt3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt3"
}
}
},
"Internal1": {
"Description": "Internal1",
"Value": {
"Ref": "Internal1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal1"
}
}
},
"Internal2": {
"Description": "Internal2",
"Value": {
"Ref": "Internal2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal2"
}
}
},
"Internal3": {
"Description": "Internal3",
"Value": {
"Ref": "Internal3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal3"
}
}
},
"ReservedNet3": {
"Description": "ReservedNet3",
"Value": {
"Ref": "ReservedNet3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet3"
}
}
},
"ReservedNet2": {
"Description": "ReservedNet2",
"Value": {
"Ref": "ReservedNet2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet2"
}
}
},
"ReservedNet1": {
"Description": "ReservedNet1",
"Value": {
"Ref": "ReservedNet1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet1"
}
}
},
"PerimeterInternal1": {
"Description": "PerimeterInternal1",
"Value": {
"Ref": "PerimeterInternal1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal1"
}
}
},
"PerimeterInternal2": {
"Description": "PerimeterInternal2",
"Value": {
"Ref": "PerimeterInternal2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal2"
}
}
},
"PerimeterInternal3": {
"Description": "PerimeterInternal3",
"Value": {
"Ref": "PerimeterInternal3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal3"
}
}
},
"InternalSubnetAcl": {
"Description": "InternalSubnetAcl",
"Value": {
"Ref": "InternalSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-InternalSubnetAcl"
}
}
},
"RestrictedSubnetAcl": {
"Description": "RestrictedSubnetAcl",
"Value": {
"Ref": "RestrictedSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-RestrictedSubnetAcl"
}
}
},
"EIPNATGW3": {
"Description": "EIP for NATGW3",
"Value": {
"Ref": "EIPNATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW3"
}
}
},
"NATGW3": {
"Description": "NATGW3",
"Value": {
"Ref": "NATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW3"
}
}
},
"EIPNATGW2": {
"Description": "EIP for NATGW2",
"Value": {
"Ref": "EIPNATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW2"
}
}
},
"NATGW2": {
"Description": "NATGW2",
"Value": {
"Ref": "NATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW2"
}
}
},
"EIPNATGW1": {
"Description": "EIP for NATGW1",
"Value": {
"Ref": "EIPNATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW1"
}
}
},
"NATGW1": {
"Description": "NATGW1",
"Value": {
"Ref": "NATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW1"
}
}
}
}
}
}
actual = src.macro.handler(transform_call, "")
print(json.dumps(actual))
self.assertEquals(test_assert, actual)
def test_macro_all_objects_by_ref(self):
transform_call = {
"transformId": "801604450668::VPC",
"templateParameterValues": {
"VGW": "vgw-06bbcf429c1cb0eed",
"Environment": "Dev",
"VpcCidr": "10.0.0.0/20",
"DnsServer": "10.0.0.2",
"PublicCidr1": "10.0.0.0/24",
"PublicCidr2": "10.0.0.1/24",
"PublicCidr3": "10.0.0.2/24",
"PrivateCidr1": "10.0.0.3/24",
"PrivateCidr2": "10.0.0.4/24",
"PrivateCidr3": "10.0.0.5/24",
"ProtectedCidr1": "10.0.0.6/24",
"ProtectedCidr2": "10.0.0.7/24",
"ProtectedCidr3": "10.0.0.8/24",
},
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"VPC": {
"Type": "Versent::Network::VPC",
"Properties": {
"Subnets": {
"Public1": {
"CIDR": {
"Ref": "PublicCidr1"
},
"AZ": 0,
"NetACL": "PublicSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 0
},
"Public2": {
"CIDR": {
"Ref": "PublicCidr2"
},
"AZ": 1,
"NetACL": "PublicSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 1
},
"Public3": {
"CIDR": {
"Ref": "PublicCidr3"
},
"AZ": 2,
"NetACL": "PublicSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 2
},
"Private1": {
"CIDR": {
"Ref": "PrivateCidr1"
},
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "PrivateRT1",
"IPv6Iter": 0
},
"Private2": {
"CIDR": {
"Ref": "PrivateCidr2"
},
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "PrivateRT2",
"IPv6Iter": 1
},
"Private3": {
"CIDR": {
"Ref": "PrivateCidr3"
},
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "PrivateRT3",
"IPv6Iter": 2
},
"Protected1": {
"CIDR": {
"Ref": "ProtectedCidr1"
},
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "RestrictedRT",
"IPv6Iter": 0
},
"Protected2": {
"CIDR": {
"Ref": "ProtectedCidr2"
},
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "RestrictedRT",
"IPv6Iter": 1
},
"Protected3": {
"CIDR": {
"Ref": "ProtectedCidr3"
},
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "RestrictedRT",
"IPv6Iter": 2
}
},
"TransitGateways": {
"Test1": {
"Subnets": [
"Private1",
"Private2",
"Private3"
],
"TransitGatewayId": "tgw-01234567890123456",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW1",
"Purpose": "Gateway Attach 1"
}
},
"Test2": {
"Subnets": [
"Private1",
"Private2",
"Private3"
],
"TransitGatewayId": "tgw-98765432109876543",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW2",
"Purpose": "Gateway Attach 2"
}
}
},
"Tags": {
"Name": "PRIVATE-EGRESS-VPC",
"Template": "VPC for private endpoints egress only",
"info:environment": "Staging",
"info:owner": "Versent"
},
"NATGateways": {
"NATGW3": {
"Subnet": "Public1",
"Routetable": "PrivateRT1"
},
"NATGW2": {
"Subnet": "Public2",
"Routetable": "PrivateRT2"
},
"NATGW1": {
"Subnet": "Public3",
"Routetable": "PrivateRT3"
}
},
"NetworkACLs": {
"InternalSubnetAcl": {
"InternalSubnetAclEntryOutTCPUnreserved": "106,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOutUDPDNSIPv6": "113,17,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutUDPUnreserved": "107,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOut": "100,-1,allow,true,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryOutSSH": "150,6,allow,true,0.0.0.0/0,22,22",
"InternalSubnetAclEntryInUDPUnreservedIPv6": "105,17,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutTCPDNSIPv6": "112,6,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutTCPDNS": "110,6,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryOutHTTPS": "103,6,allow,true,0.0.0.0/0,443,443",
"InternalSubnetAclEntryOutHTTP": "102,6,allow,true,0.0.0.0/0,80,80",
"InternalSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"InternalSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"InternalSubnetAclEntryInTCPUnreservedIPv6": "104,6,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutUDPDNS": "111,17,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryIn": "100,-1,allow,false,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryInTCPUnreserved": "102,6,allow,false,0.0.0.0/0,1024,65535",
"InternalSubnetAclEntryInUDPUnreserved": "103,17,allow,false,0.0.0.0/0,1024,65535"
},
"PublicSubnetAcl": {
"RestrictedSubnetAclEntryInUDPUnReserved": "91,17,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutSSH": "103,6,allow,true,0.0.0.0/0,22,22",
"RestrictedSubnetAclEntryOutDNSTCPIPv6": "151,6,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": "92,6,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryNTP": "120,6,allow,true,0.0.0.0/0,123,123",
"RestrictedSubnetAclEntryOutPuppet": "94,6,allow,true,172.16.0.0/16,8140,8140",
"RestrictedSubnetAclEntryIn": "110,-1,allow,false,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTP": "101,6,allow,true,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPSIPv6": "104,6,allow,false,::/0,443,443",
"RestrictedSubnetAclEntryInNetBios": "170,6,allow,false,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutDNSTCP": "150,6,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": "93,17,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryInHTTP": "101,6,allow,false,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPIPv6": "103,6,allow,false,::/0,80,80",
"RestrictedSubnetAclEntryOutDNSUDP": "160,17,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInTCPUnReserved": "90,6,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutTCPUnReserved": "90,6,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInDNSTCP": "150,6,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": "93,17,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutNetBios1": "180,6,allow,true,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOut": "110,-1,allow,true,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"RestrictedSubnetAclEntryOutHTTPS": "102,6,allow,true,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryOutNetBios": "170,6,allow,true,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": "92,6,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutUDPUnReserved": "91,17,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInNetBios1": "80,6,allow,false,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOutSSHIPv6": "106,6,allow,true,::/0,22,22",
"RestrictedSubnetAclEntryInHTTPS": "102,6,allow,false,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryInDNSUDP": "160,17,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutDNSUDPIPv6": "161,17,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryInSquid2": "140,6,allow,false,172.16.0.0/16,3128,3128"
}
},
"SecurityGroups": {
"VPCEndpoint": {
"SecurityGroupIngress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
],
"Tags": {
"Name": "VPCEndpoint"
},
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"SecurityGroupEgress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
]
}
},
"Details": {
"VPCDesc": "PrivateVPC",
"Region": "ap-southeast-2",
"VPCName": {
"Fn::Sub": "VPC${Environment}"
},
"IPv6": "false"
},
"DHCP": {
"NTPServers": "169.254.169.123",
"NTBType": 2,
"Name": "DhcpOptions",
"DNSServers": "172.16.0.2"
},
"CIDR": "172.16.0.0/20",
"Endpoints": {
"kinesis-streams": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudtrail": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudformation": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"elasticloadbalancing": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ec2": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"logs": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"monitoring": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"s3": {
"RouteTableIds": [
"PublicRT",
"PrivateRT1",
"PrivateRT2",
"PrivateRT3",
"RestrictedRT"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"dynamodb": {
"RouteTableIds": [
"PublicRT",
"PrivateRT1",
"PrivateRT2",
"PrivateRT3",
"RestrictedRT"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"ec2messages": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"kms": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"config": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"events": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.api": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssm": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sns": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.runtime": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"codebuild": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"servicecatalog": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"execute-api": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"secretsmanager": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssmmessages": {
"SubnetIds": [
"Private1",
"Private2",
"Private3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
}
},
"RouteTables": {
"PrivateRT3": "",
"PublicRT": [
{
"RouteName": "PublicRoute",
"RouteCIDR": "0.0.0.0/0",
"RouteGW": "InternetGateway"
}
],
"PrivateRT2": "",
"PrivateRT1": "",
"RestrictedRT": ""
}
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
},
"Environment": {
"Description": "Name of target environment",
"Type": "String",
"Default": "foo"
},
"VpcCidr": {
"Description": "CIDR range for the complete VPC",
"Type": "String",
"Default": "10.0.0.0/20",
},
"DnsServer": {
"Description": "DNS server",
"Type": "String",
"Default": "10.0.0.2"
},
"PublicCidr1": {
"Description": "CIDR range for Public1",
"Type": "String",
"Default": "10.0.0.0/24"
},
"PublicCidr2": {
"Description": "CIDR range for Public2",
"Type": "String",
"Default": "10.0.0.1/24"
},
"PublicCidr3": {
"Description": "CIDR range for Public3",
"Type": "String",
"Default": "10.0.0.2/24"
},
"PrivateCidr1": {
"Description": "CIDR range for Private1",
"Type": "String",
"Default": "10.0.0.3/24"
},
"PrivateCidr2": {
"Description": "CIDR range for Private2",
"Type": "String",
"Default": "10.0.0.4/24"
},
"PrivateCidr3": {
"Description": "CIDR range for Private3",
"Type": "String",
"Default": "10.0.0.5/24"
},
"ProtectedCidr1": {
"Description": "CIDR range for Protected1",
"Type": "String",
"Default": "10.0.0.6/24"
},
"ProtectedCidr2": {
"Description": "CIDR range for Protected2",
"Type": "String",
"Default": "10.0.0.7/24"
},
"ProtectedCidr3": {
"Description": "CIDR range for Protected3",
"Type": "String",
"Default": "10.0.0.8/24"
}
},
"Mappings": {}
},
"region": "us-east-1",
"params": {},
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"accountId": "012345678901"
}
test_assert = {
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"status": "success",
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"VPCDev": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "172.16.0.0/20",
"EnableDnsHostnames": True,
"EnableDnsSupport": True,
"InstanceTenancy": "default",
"Tags": [
{
"Key": "Name",
"Value": "VPCDev"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"IPv6Block": {
"Type": "AWS::EC2::VPCCidrBlock",
"Properties": {
"VpcId": {
"Ref": "VPCDev"
},
"AmazonProvidedIpv6CidrBlock": True
}
},
"EgressGateway": {
"Type": "AWS::EC2::EgressOnlyInternetGateway",
"Properties": {
"VpcId": {
"Ref": "VPCDev"
}
}
},
"DhcpOptions": {
"Type": "AWS::EC2::DHCPOptions",
"Properties": {
"DomainNameServers": [
"172.16.0.2"
],
"NtpServers": [
"169.254.169.123"
],
"NetbiosNodeType": 2,
"Tags": [
{
"Key": "Name",
"Value": "DhcpOptions"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"DhcpOptionsAssociation": {
"Type": "AWS::EC2::VPCDHCPOptionsAssociation",
"Properties": {
"DhcpOptionsId": {
"Ref": "DhcpOptions"
},
"VpcId": {
"Ref": "VPCDev"
}
}
},
"InternetGateway": {
"Type": "AWS::EC2::InternetGateway",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternetGateway"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"IGWVPCGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"InternetGatewayId": {
"Ref": "InternetGateway"
},
"VpcId": {
"Ref": "VPCDev"
}
}
},
"VPCGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"VpcId": {
"Ref": "VPCDev"
},
"VpnGatewayId": {
"Ref": "VGW"
}
}
},
"VPCFlowLogsRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"vpc-flow-logs.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
"Path": "/",
"Policies": [
{
"PolicyName": "root",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
}
]
}
}
]
}
},
"VPCFlowLogs": {
"Type": "AWS::EC2::FlowLog",
"Properties": {
"DeliverLogsPermissionArn": {
"Fn::GetAtt": [
"VPCFlowLogsRole",
"Arn"
]
},
"LogGroupName": "FlowLogsGroup",
"ResourceId": {
"Ref": "VPCDev"
},
"ResourceType": "VPC",
"TrafficType": "ALL"
}
},
"Test1TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-01234567890123456",
"VpcId": {
"Ref": "VPCDev"
},
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW1"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"Test2TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-98765432109876543",
"VpcId": {
"Ref": "VPCDev"
},
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW2"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"PrivateRT3": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PrivateRT3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"PrivateRT3RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "PrivateRT3"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"PublicRT": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PublicRT"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"PublicRTRoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "PublicRT"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"PublicRoute": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": {
"Ref": "InternetGateway"
},
"RouteTableId": {
"Ref": "PublicRT"
}
}
},
"PrivateRT2": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PrivateRT2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"PrivateRT2RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "PrivateRT2"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"PrivateRT1": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PrivateRT1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"PrivateRT1RoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "PrivateRT1"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"RestrictedRT": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "RestrictedRT"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"RestrictedRTRoutePropagation": {
"Type": "AWS::EC2::VPNGatewayRoutePropagation",
"Properties": {
"RouteTableIds": [
{
"Ref": "RestrictedRT"
}
],
"VpnGatewayId": {
"Ref": "VGW"
}
},
"DependsOn": [
"VPCGatewayAttachment"
]
},
"Public1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PublicCidr1"
},
"Tags": [
{
"Key": "Name",
"Value": "Public1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
0,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Public1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "Public1"
}
}
},
"Public1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"SubnetId": {
"Ref": "Public1"
}
}
},
"Public2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PublicCidr2"
},
"Tags": [
{
"Key": "Name",
"Value": "Public2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
1,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Public2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "Public2"
}
}
},
"Public2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"SubnetId": {
"Ref": "Public2"
}
}
},
"Public3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PublicCidr3"
},
"Tags": [
{
"Key": "Name",
"Value": "Public3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
2,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Public3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "Public3"
}
}
},
"Public3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"SubnetId": {
"Ref": "Public3"
}
}
},
"Private1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PrivateCidr1"
},
"Tags": [
{
"Key": "Name",
"Value": "Private1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
0,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Private1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRT1"
},
"SubnetId": {
"Ref": "Private1"
}
}
},
"Private1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Private1"
}
}
},
"Private2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PrivateCidr2"
},
"Tags": [
{
"Key": "Name",
"Value": "Private2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
1,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Private2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRT2"
},
"SubnetId": {
"Ref": "Private2"
}
}
},
"Private2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Private2"
}
}
},
"Private3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "PrivateCidr3"
},
"Tags": [
{
"Key": "Name",
"Value": "Private3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
2,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Private3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PrivateRT3"
},
"SubnetId": {
"Ref": "Private3"
}
}
},
"Private3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Private3"
}
}
},
"Protected1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "ProtectedCidr1"
},
"Tags": [
{
"Key": "Name",
"Value": "Protected1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
0,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Protected1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "RestrictedRT"
},
"SubnetId": {
"Ref": "Protected1"
}
}
},
"Protected1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Protected1"
}
}
},
"Protected2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "ProtectedCidr2"
},
"Tags": [
{
"Key": "Name",
"Value": "Protected2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
1,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Protected2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "RestrictedRT"
},
"SubnetId": {
"Ref": "Protected2"
}
}
},
"Protected2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Protected2"
}
}
},
"Protected3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": {
"Ref": "ProtectedCidr3"
},
"Tags": [
{
"Key": "Name",
"Value": "Protected3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
2,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"VPCDev",
"Ipv6CidrBlocks"
]
}
]
},
9,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Protected3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "RestrictedRT"
},
"SubnetId": {
"Ref": "Protected3"
}
}
},
"Protected3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Protected3"
}
}
},
"InternalSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"InternalSubnetAclEntryOutTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"InternalSubnetAclEntryOutUDPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 113
}
},
"InternalSubnetAclEntryOutUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 107
}
},
"InternalSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"InternalSubnetAclEntryInUDPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryOutTCPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 112
}
},
"InternalSubnetAclEntryOutTCPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"InternalSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"InternalSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryInTCPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutUDPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 111
}
},
"InternalSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryInTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryInUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"PublicSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PublicSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
],
"VpcId": {
"Ref": "VPCDev"
}
}
},
"RestrictedSubnetAclEntryInUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSTCPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 151
}
},
"RestrictedSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryNTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 123,
"To": 123
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 120
}
},
"RestrictedSubnetAclEntryOutPuppet": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 8140,
"To": 8140
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 94
}
},
"RestrictedSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryInNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryInHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryInTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryOutTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryInDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryOutNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 180
}
},
"RestrictedSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryOutNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryOutUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryInNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 80
}
},
"RestrictedSubnetAclEntryOutSSHIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"RestrictedSubnetAclEntryInHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryInDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryOutDNSUDPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 161
}
},
"RestrictedSubnetAclEntryInSquid2": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "PublicSubnetAcl"
},
"PortRange": {
"From": 3128,
"To": 3128
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 140
}
},
"EIPNATGW3": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW3": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW3",
"AllocationId"
]
},
"SubnetId": {
"Ref": "Public1"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"PrivateRT1NATGW3": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW3"
},
"RouteTableId": {
"Ref": "PrivateRT1"
}
}
},
"PrivateRT1NATGW3IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "PrivateRT1"
}
}
},
"EIPNATGW2": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW2": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW2",
"AllocationId"
]
},
"SubnetId": {
"Ref": "Public2"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"PrivateRT2NATGW2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW2"
},
"RouteTableId": {
"Ref": "PrivateRT2"
}
}
},
"PrivateRT2NATGW2IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "PrivateRT2"
}
}
},
"EIPNATGW1": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW1": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW1",
"AllocationId"
]
},
"SubnetId": {
"Ref": "Public3"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"PrivateRT3NATGW1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW1"
},
"RouteTableId": {
"Ref": "PrivateRT3"
}
}
},
"PrivateRT3NATGW1IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "PrivateRT3"
}
}
},
"VPCEndpoint": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupName": "VPCEndpoint",
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"VpcId": {
"Ref": "VPCDev"
},
"SecurityGroupIngress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"SecurityGroupEgress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"Tags": [
{
"Key": "Name",
"Value": "VPCEndpoint"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
},
{
"Key": "info:environment",
"Value": "Staging"
},
{
"Key": "info:owner",
"Value": "Versent"
}
]
}
},
"kinesisstreamsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kinesis-streams"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudtrailEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudtrail"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudformationEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudformation"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"elasticloadbalancingEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".elasticloadbalancing"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ec2EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"logsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".logs"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"monitoringEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".monitoring"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"s3EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".s3"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "VPCDev"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "PrivateRT1"
},
{
"Ref": "PrivateRT2"
},
{
"Ref": "PrivateRT3"
},
{
"Ref": "RestrictedRT"
}
]
}
},
"dynamodbEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".dynamodb"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "VPCDev"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "PrivateRT1"
},
{
"Ref": "PrivateRT2"
},
{
"Ref": "PrivateRT3"
},
{
"Ref": "RestrictedRT"
}
]
}
},
"ec2messagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2messages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"kmsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kms"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"configEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".config"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"eventsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".events"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssm"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"snsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sns"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerruntimeEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.runtime"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"codebuildEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".codebuild"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"servicecatalogEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".servicecatalog"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"executeapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".execute-api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"secretsmanagerEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".secretsmanager"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmmessagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssmmessages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "VPCDev"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "Private1"
},
{
"Ref": "Private2"
},
{
"Ref": "Private3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
},
"Environment": {
"Description": "Name of target environment",
"Type": "String",
"Default": "foo"
},
"VpcCidr": {
"Description": "CIDR range for the complete VPC",
"Type": "String",
"Default": "10.0.0.0/20"
},
"DnsServer": {
"Description": "DNS server",
"Type": "String",
"Default": "10.0.0.2"
},
"PublicCidr1": {
"Description": "CIDR range for Public1",
"Type": "String",
"Default": "10.0.0.0/24"
},
"PublicCidr2": {
"Description": "CIDR range for Public2",
"Type": "String",
"Default": "10.0.0.1/24"
},
"PublicCidr3": {
"Description": "CIDR range for Public3",
"Type": "String",
"Default": "10.0.0.2/24"
},
"PrivateCidr1": {
"Description": "CIDR range for Private1",
"Type": "String",
"Default": "10.0.0.3/24"
},
"PrivateCidr2": {
"Description": "CIDR range for Private2",
"Type": "String",
"Default": "10.0.0.4/24"
},
"PrivateCidr3": {
"Description": "CIDR range for Private3",
"Type": "String",
"Default": "10.0.0.5/24"
},
"ProtectedCidr1": {
"Description": "CIDR range for Protected1",
"Type": "String",
"Default": "10.0.0.6/24"
},
"ProtectedCidr2": {
"Description": "CIDR range for Protected2",
"Type": "String",
"Default": "10.0.0.7/24"
},
"ProtectedCidr3": {
"Description": "CIDR range for Protected3",
"Type": "String",
"Default": "10.0.0.8/24"
}
},
"Mappings": {},
"Outputs": {
"VPCDev": {
"Description": "VPCDev",
"Value": {
"Fn::Sub": "VPCDev"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-VPCid"
}
}
},
"PrivateRT3": {
"Description": "PrivateRT3",
"Value": {
"Ref": "PrivateRT3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PrivateRT3"
}
}
},
"PublicRT": {
"Description": "PublicRT",
"Value": {
"Ref": "PublicRT"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PublicRT"
}
}
},
"PrivateRT2": {
"Description": "PrivateRT2",
"Value": {
"Ref": "PrivateRT2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PrivateRT2"
}
}
},
"PrivateRT1": {
"Description": "PrivateRT1",
"Value": {
"Ref": "PrivateRT1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PrivateRT1"
}
}
},
"RestrictedRT": {
"Description": "RestrictedRT",
"Value": {
"Ref": "RestrictedRT"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-RestrictedRT"
}
}
},
"Public1": {
"Description": "Public1",
"Value": {
"Ref": "Public1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Public1"
}
}
},
"Public2": {
"Description": "Public2",
"Value": {
"Ref": "Public2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Public2"
}
}
},
"Public3": {
"Description": "Public3",
"Value": {
"Ref": "Public3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Public3"
}
}
},
"Private1": {
"Description": "Private1",
"Value": {
"Ref": "Private1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Private1"
}
}
},
"Private2": {
"Description": "Private2",
"Value": {
"Ref": "Private2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Private2"
}
}
},
"Private3": {
"Description": "Private3",
"Value": {
"Ref": "Private3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Private3"
}
}
},
"Protected1": {
"Description": "Protected1",
"Value": {
"Ref": "Protected1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Protected1"
}
}
},
"Protected2": {
"Description": "Protected2",
"Value": {
"Ref": "Protected2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Protected2"
}
}
},
"Protected3": {
"Description": "Protected3",
"Value": {
"Ref": "Protected3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Protected3"
}
}
},
"InternalSubnetAcl": {
"Description": "InternalSubnetAcl",
"Value": {
"Ref": "InternalSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-InternalSubnetAcl"
}
}
},
"PublicSubnetAcl": {
"Description": "PublicSubnetAcl",
"Value": {
"Ref": "PublicSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-PublicSubnetAcl"
}
}
},
"EIPNATGW3": {
"Description": "EIP for NATGW3",
"Value": {
"Ref": "EIPNATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW3"
}
}
},
"NATGW3": {
"Description": "NATGW3",
"Value": {
"Ref": "NATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW3"
}
}
},
"EIPNATGW2": {
"Description": "EIP for NATGW2",
"Value": {
"Ref": "EIPNATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW2"
}
}
},
"NATGW2": {
"Description": "NATGW2",
"Value": {
"Ref": "NATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW2"
}
}
},
"EIPNATGW1": {
"Description": "EIP for NATGW1",
"Value": {
"Ref": "EIPNATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW1"
}
}
},
"NATGW1": {
"Description": "NATGW1",
"Value": {
"Ref": "NATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW1"
}
}
}
}
}
}
actual = src.macro.handler(transform_call, "")
self.assertEquals(test_assert, actual)
def test_macro_all_objects_no_vpgw(self):
transform_call = {
"transformId": "801604450668::VPC",
"templateParameterValues": {},
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"VPC": {
"Type": "Versent::Network::VPC",
"Properties": {
"Subnets": {
"ReservedMgmt1": {
"CIDR": "172.16.0.0/26",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 0
},
"ReservedMgmt2": {
"CIDR": "172.16.1.0/26",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 1
},
"ReservedMgmt3": {
"CIDR": "172.16.2.0/26",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 2
},
"Internal1": {
"CIDR": "172.16.3.0/24",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 6
},
"Internal2": {
"CIDR": "172.16.4.0/24",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 7
},
"Internal3": {
"CIDR": "172.16.5.0/24",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 8
},
"ReservedNet3": {
"CIDR": "172.16.2.192/26",
"AZ": 2,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 9
},
"ReservedNet2": {
"CIDR": "172.16.1.192/26",
"AZ": 1,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 10
},
"ReservedNet1": {
"CIDR": "172.16.0.192/26",
"AZ": 0,
"NetACL": "RestrictedSubnetAcl",
"RouteTable": "PublicRT",
"IPv6Iter": 11
},
"PerimeterInternal1": {
"CIDR": "172.16.6.0/24",
"AZ": 0,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT1",
"IPv6Iter": 3
},
"PerimeterInternal2": {
"CIDR": "172.16.7.0/24",
"AZ": 1,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT2",
"IPv6Iter": 4
},
"PerimeterInternal3": {
"CIDR": "172.16.8.0/24",
"AZ": 2,
"NetACL": "InternalSubnetAcl",
"RouteTable": "InternalRT3",
"IPv6Iter": 5
}
},
"TransitGateways": {
"Test1": {
"Subnets": [
"Internal1",
"Internal2",
"Internal3"
],
"TransitGatewayId": "tgw-01234567890123456",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW1",
"Purpose": "Gateway Attach 1"
}
},
"Test2": {
"Subnets": [
"Internal1",
"Internal2",
"Internal3"
],
"TransitGatewayId": "tgw-98765432109876543",
"Tags": {
"Name": "PRIVATE-EGRESS-VPC-TGW2",
"Purpose": "Gateway Attach 2"
},
"RouteTables": {
"InternalRT1": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
],
"InternalRT2": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
],
"InternalRT3": [
{
"RouteName": "Internal1",
"RouteCIDR": "10.0.0.0/8"
},
{
"RouteName": "Internal2",
"RouteCIDR": "192.168.0.0/16"
}
]
}
}
},
"Tags": {
"Name": "PRIVATE-EGRESS-VPC",
"Template": "VPC for private endpoints egress only"
},
"NATGateways": {
"NATGW3": {
"Subnet": "ReservedNet3",
"Routetable": "InternalRT3"
},
"NATGW2": {
"Subnet": "ReservedNet2",
"Routetable": "InternalRT2"
},
"NATGW1": {
"Subnet": "ReservedNet1",
"Routetable": "InternalRT1"
}
},
"NetworkACLs": {
"InternalSubnetAcl": {
"InternalSubnetAclEntryOutTCPUnreserved": "106,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOutUDPDNSIPv6": "113,17,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutUDPUnreserved": "107,6,allow,true,172.16.0.0/16,1024,65535",
"InternalSubnetAclEntryOut": "100,-1,allow,true,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryOutSSH": "150,6,allow,true,0.0.0.0/0,22,22",
"InternalSubnetAclEntryInUDPUnreservedIPv6": "105,17,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutTCPDNSIPv6": "112,6,allow,true,::/0,53,53",
"InternalSubnetAclEntryOutTCPDNS": "110,6,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryOutHTTPS": "103,6,allow,true,0.0.0.0/0,443,443",
"InternalSubnetAclEntryOutHTTP": "102,6,allow,true,0.0.0.0/0,80,80",
"InternalSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"InternalSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"InternalSubnetAclEntryInTCPUnreservedIPv6": "104,6,allow,false,::/0,1024,65535",
"InternalSubnetAclEntryOutUDPDNS": "111,17,allow,true,0.0.0.0/0,53,53",
"InternalSubnetAclEntryIn": "100,-1,allow,false,172.16.0.0/16,1,65535",
"InternalSubnetAclEntryInTCPUnreserved": "102,6,allow,false,0.0.0.0/0,1024,65535",
"InternalSubnetAclEntryInUDPUnreserved": "103,17,allow,false,0.0.0.0/0,1024,65535"
},
"RestrictedSubnetAcl": {
"RestrictedSubnetAclEntryInUDPUnReserved": "91,17,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutSSH": "103,6,allow,true,0.0.0.0/0,22,22",
"RestrictedSubnetAclEntryOutDNSTCPIPv6": "151,6,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryOutHTTPSIPv6": "105,6,allow,true,::/0,443,443",
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": "92,6,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryNTP": "120,6,allow,true,0.0.0.0/0,123,123",
"RestrictedSubnetAclEntryOutPuppet": "94,6,allow,true,172.16.0.0/16,8140,8140",
"RestrictedSubnetAclEntryIn": "110,-1,allow,false,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTP": "101,6,allow,true,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPSIPv6": "104,6,allow,false,::/0,443,443",
"RestrictedSubnetAclEntryInNetBios": "170,6,allow,false,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutDNSTCP": "150,6,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": "93,17,allow,false,::/0,1024,65535",
"RestrictedSubnetAclEntryInHTTP": "101,6,allow,false,0.0.0.0/0,80,80",
"RestrictedSubnetAclEntryInHTTPIPv6": "103,6,allow,false,::/0,80,80",
"RestrictedSubnetAclEntryOutDNSUDP": "160,17,allow,true,0.0.0.0/0,53,53",
"RestrictedSubnetAclEntryInTCPUnReserved": "90,6,allow,false,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryOutTCPUnReserved": "90,6,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInDNSTCP": "150,6,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": "93,17,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutNetBios1": "180,6,allow,true,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOut": "110,-1,allow,true,172.16.0.0/16,1,65535",
"RestrictedSubnetAclEntryOutHTTPIPv6": "104,6,allow,true,::/0,80,80",
"RestrictedSubnetAclEntryOutHTTPS": "102,6,allow,true,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryOutNetBios": "170,6,allow,true,172.16.0.0/16,389,389",
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": "92,6,allow,true,::/0,1024,65535",
"RestrictedSubnetAclEntryOutUDPUnReserved": "91,17,allow,true,0.0.0.0/0,1024,65535",
"RestrictedSubnetAclEntryInNetBios1": "80,6,allow,false,172.16.0.0/16,137,139",
"RestrictedSubnetAclEntryOutSSHIPv6": "106,6,allow,true,::/0,22,22",
"RestrictedSubnetAclEntryInHTTPS": "102,6,allow,false,0.0.0.0/0,443,443",
"RestrictedSubnetAclEntryInDNSUDP": "160,17,allow,false,172.16.0.0/16,53,53",
"RestrictedSubnetAclEntryOutDNSUDPIPv6": "161,17,allow,true,::/0,53,53",
"RestrictedSubnetAclEntryInSquid2": "140,6,allow,false,172.16.0.0/16,3128,3128"
}
},
"SecurityGroups": {
"VPCEndpoint": {
"SecurityGroupIngress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
],
"Tags": {
"Name": "VPCEndpoint"
},
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"SecurityGroupEgress": [
[
"icmp",
-1,
-1,
"172.16.0.0/20",
"All ICMP Traffic"
],
[
"tcp",
0,
65535,
"172.16.0.0/20",
"All TCP Traffic"
],
[
"udp",
0,
65535,
"172.16.0.0/20",
"All UDP Traffic"
]
]
}
},
"Details": {
"VPCDesc": "Private Egress VPC",
"Region": "ap-southeast-2",
"VPCName": "PRIVATEEGRESSVPC",
"IPv6": "true"
},
"DHCP": {
"NTPServers": "169.254.169.123",
"NTBType": 2,
"Name": "DhcpOptions",
"DNSServers": "172.16.0.2"
},
"CIDR": "172.16.0.0/20",
"Endpoints": {
"kinesis-streams": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudtrail": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"cloudformation": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"elasticloadbalancing": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ec2": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"logs": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"monitoring": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"s3": {
"RouteTableIds": [
"PublicRT",
"InternalRT1",
"InternalRT2",
"InternalRT3"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"dynamodb": {
"RouteTableIds": [
"PublicRT",
"InternalRT1",
"InternalRT2",
"InternalRT3"
],
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"Type": "Gateway"
},
"ec2messages": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"kms": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"config": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"events": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.api": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssm": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sns": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"sagemaker.runtime": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"codebuild": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"servicecatalog": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"execute-api": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"secretsmanager": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
},
"ssmmessages": {
"SubnetIds": [
"ReservedMgmt1",
"ReservedMgmt2",
"ReservedMgmt3"
],
"Type": "Interface",
"SecurityGroupIds": [
"VPCEndpoint"
]
}
},
"RouteTables": {
"InternalRT3": "",
"PublicRT": [
{
"RouteName": "PublicRoute",
"RouteCIDR": "0.0.0.0/0",
"RouteGW": "InternetGateway"
},
{
"RouteName": "PublicRouteIPv6",
"RouteCIDR": "::/0",
"RouteGW": "InternetGateway"
}
],
"InternalRT2": "",
"InternalRT1": ""
}
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
}
},
"Mappings": {}
},
"region": "us-east-1",
"params": {},
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"accountId": "012345678901"
}
test_assert = {
"requestId": "508122ef-6442-46eb-b2fc-5fab1f4f7064",
"status": "success",
"fragment": {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"PRIVATEEGRESSVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "172.16.0.0/20",
"EnableDnsHostnames": True,
"EnableDnsSupport": True,
"InstanceTenancy": "default",
"Tags": [
{
"Key": "Name",
"Value": "PRIVATEEGRESSVPC"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"IPv6Block": {
"Type": "AWS::EC2::VPCCidrBlock",
"Properties": {
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AmazonProvidedIpv6CidrBlock": True
}
},
"EgressGateway": {
"Type": "AWS::EC2::EgressOnlyInternetGateway",
"Properties": {
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"DhcpOptions": {
"Type": "AWS::EC2::DHCPOptions",
"Properties": {
"DomainNameServers": [
"172.16.0.2"
],
"NtpServers": [
"169.254.169.123"
],
"NetbiosNodeType": 2,
"Tags": [
{
"Key": "Name",
"Value": "DhcpOptions"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"DhcpOptionsAssociation": {
"Type": "AWS::EC2::VPCDHCPOptionsAssociation",
"Properties": {
"DhcpOptionsId": {
"Ref": "DhcpOptions"
},
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternetGateway": {
"Type": "AWS::EC2::InternetGateway",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternetGateway"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"IGWVPCGatewayAttachment": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"InternetGatewayId": {
"Ref": "InternetGateway"
},
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"VPCFlowLogsRole": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"vpc-flow-logs.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
"Path": "/",
"Policies": [
{
"PolicyName": "root",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
}
]
}
}
]
}
},
"VPCFlowLogs": {
"Type": "AWS::EC2::FlowLog",
"Properties": {
"DeliverLogsPermissionArn": {
"Fn::GetAtt": [
"VPCFlowLogsRole",
"Arn"
]
},
"LogGroupName": "FlowLogsGroup",
"ResourceId": {
"Ref": "PRIVATEEGRESSVPC"
},
"ResourceType": "VPC",
"TrafficType": "ALL"
}
},
"Test1TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-01234567890123456",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SubnetIds": [
{
"Ref": "Internal1"
},
{
"Ref": "Internal2"
},
{
"Ref": "Internal3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW1"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"Test2TransitGWAttach": {
"Type": "AWS::EC2::TransitGatewayAttachment",
"Properties": {
"TransitGatewayId": "tgw-98765432109876543",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SubnetIds": [
{
"Ref": "Internal1"
},
{
"Ref": "Internal2"
},
{
"Ref": "Internal3"
}
],
"Tags": [
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC-TGW2"
},
{
"Key": "Purpose",
"Value": "Gateway Attach 2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"Test2InternalRT1Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT1Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT1"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT2Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT2"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "10.0.0.0/8",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"Test2InternalRT3Internal2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "192.168.0.0/16",
"TransitGatewayId": "tgw-98765432109876543",
"RouteTableId": {
"Ref": "InternalRT3"
}
},
"DependsOn": [
"Test2TransitGWAttach"
]
},
"InternalRT3": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"PublicRT": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "PublicRT"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"PublicRoute": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": {
"Ref": "InternetGateway"
},
"RouteTableId": {
"Ref": "PublicRT"
}
}
},
"PublicRouteIPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"GatewayId": {
"Ref": "InternetGateway"
},
"RouteTableId": {
"Ref": "PublicRT"
}
}
},
"InternalRT2": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalRT1": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalRT1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"ReservedMgmt1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.0.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
0,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "ReservedMgmt1"
}
}
},
"ReservedMgmt1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt1"
}
}
},
"ReservedMgmt2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.1.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
1,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "ReservedMgmt2"
}
}
},
"ReservedMgmt2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt2"
}
}
},
"ReservedMgmt3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.2.0/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedMgmt3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
2,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedMgmt3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "ReservedMgmt3"
}
}
},
"ReservedMgmt3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedMgmt3"
}
}
},
"Internal1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.3.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
6,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "Internal1"
}
}
},
"Internal1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal1"
}
}
},
"Internal2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.4.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
7,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "Internal2"
}
}
},
"Internal2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal2"
}
}
},
"Internal3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.5.0/24",
"Tags": [
{
"Key": "Name",
"Value": "Internal3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
8,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"Internal3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "Internal3"
}
}
},
"Internal3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "Internal3"
}
}
},
"ReservedNet3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.2.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
9,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet3"
}
}
},
"ReservedNet3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet3"
}
}
},
"ReservedNet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.1.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
10,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet2"
}
}
},
"ReservedNet2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet2"
}
}
},
"ReservedNet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.0.192/26",
"Tags": [
{
"Key": "Name",
"Value": "ReservedNet1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
11,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"ReservedNet1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "PublicRT"
},
"SubnetId": {
"Ref": "ReservedNet1"
}
}
},
"ReservedNet1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"SubnetId": {
"Ref": "ReservedNet1"
}
}
},
"PerimeterInternal1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
0,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.6.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
3,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal1SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT1"
},
"SubnetId": {
"Ref": "PerimeterInternal1"
}
}
},
"PerimeterInternal1SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal1"
}
}
},
"PerimeterInternal2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
1,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.7.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
4,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal2SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT2"
},
"SubnetId": {
"Ref": "PerimeterInternal2"
}
}
},
"PerimeterInternal2SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal2"
}
}
},
"PerimeterInternal3": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"AvailabilityZone": {
"Fn::Select": [
2,
{
"Fn::GetAZs": ""
}
]
},
"CidrBlock": "172.16.8.0/24",
"Tags": [
{
"Key": "Name",
"Value": "PerimeterInternal3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"AssignIpv6AddressOnCreation": True,
"Ipv6CidrBlock": {
"Fn::Select": [
5,
{
"Fn::Cidr": [
{
"Fn::Select": [
0,
{
"Fn::GetAtt": [
"PRIVATEEGRESSVPC",
"Ipv6CidrBlocks"
]
}
]
},
12,
64
]
}
]
}
},
"DependsOn": "IPv6Block"
},
"PerimeterInternal3SubnetRoutetableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"RouteTableId": {
"Ref": "InternalRT3"
},
"SubnetId": {
"Ref": "PerimeterInternal3"
}
}
},
"PerimeterInternal3SubnetNetworkACLAssociation": {
"Type": "AWS::EC2::SubnetNetworkAclAssociation",
"Properties": {
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"SubnetId": {
"Ref": "PerimeterInternal3"
}
}
},
"InternalSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "InternalSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"InternalSubnetAclEntryOutTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"InternalSubnetAclEntryOutUDPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 113
}
},
"InternalSubnetAclEntryOutUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 107
}
},
"InternalSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"InternalSubnetAclEntryInUDPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryOutTCPDNSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 112
}
},
"InternalSubnetAclEntryOutTCPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"InternalSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"InternalSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"InternalSubnetAclEntryInTCPUnreservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"InternalSubnetAclEntryOutUDPDNS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 111
}
},
"InternalSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 100
}
},
"InternalSubnetAclEntryInTCPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"InternalSubnetAclEntryInUDPUnreserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "InternalSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAcl": {
"Type": "AWS::EC2::NetworkAcl",
"Properties": {
"Tags": [
{
"Key": "Name",
"Value": "RestrictedSubnetAcl"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
],
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
}
}
},
"RestrictedSubnetAclEntryInUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryOutSSH": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSTCPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 151
}
},
"RestrictedSubnetAclEntryOutHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 105
}
},
"RestrictedSubnetAclEntryInTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryNTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 123,
"To": 123
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 120
}
},
"RestrictedSubnetAclEntryOutPuppet": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 8140,
"To": 8140
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 94
}
},
"RestrictedSubnetAclEntryIn": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPSIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryInNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryInUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryInHTTP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 101
}
},
"RestrictedSubnetAclEntryInHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 103
}
},
"RestrictedSubnetAclEntryOutDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryInTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryOutTCPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 90
}
},
"RestrictedSubnetAclEntryInDNSTCP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 150
}
},
"RestrictedSubnetAclEntryOutUDPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 93
}
},
"RestrictedSubnetAclEntryOutNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 180
}
},
"RestrictedSubnetAclEntryOut": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1,
"To": 65535
},
"Protocol": -1,
"RuleAction": "allow",
"RuleNumber": 110
}
},
"RestrictedSubnetAclEntryOutHTTPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 80,
"To": 80
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 104
}
},
"RestrictedSubnetAclEntryOutHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryOutNetBios": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 389,
"To": 389
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 170
}
},
"RestrictedSubnetAclEntryOutTCPUnReservedIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 92
}
},
"RestrictedSubnetAclEntryOutUDPUnReserved": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 1024,
"To": 65535
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 91
}
},
"RestrictedSubnetAclEntryInNetBios1": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 137,
"To": 139
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 80
}
},
"RestrictedSubnetAclEntryOutSSHIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 22,
"To": 22
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 106
}
},
"RestrictedSubnetAclEntryInHTTPS": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "0.0.0.0/0",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 443,
"To": 443
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 102
}
},
"RestrictedSubnetAclEntryInDNSUDP": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 160
}
},
"RestrictedSubnetAclEntryOutDNSUDPIPv6": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"Ipv6CidrBlock": "::/0",
"Egress": True,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 53,
"To": 53
},
"Protocol": 17,
"RuleAction": "allow",
"RuleNumber": 161
}
},
"RestrictedSubnetAclEntryInSquid2": {
"Type": "AWS::EC2::NetworkAclEntry",
"Properties": {
"CidrBlock": "172.16.0.0/16",
"Egress": False,
"NetworkAclId": {
"Ref": "RestrictedSubnetAcl"
},
"PortRange": {
"From": 3128,
"To": 3128
},
"Protocol": 6,
"RuleAction": "allow",
"RuleNumber": 140
}
},
"EIPNATGW3": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW3": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW3",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet3"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW3"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT3NATGW3": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW3"
},
"RouteTableId": {
"Ref": "InternalRT3"
}
}
},
"InternalRT3NATGW3IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT3"
}
}
},
"EIPNATGW2": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW2": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW2",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet2"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW2"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT2NATGW2": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW2"
},
"RouteTableId": {
"Ref": "InternalRT2"
}
}
},
"InternalRT2NATGW2IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT2"
}
}
},
"EIPNATGW1": {
"Type": "AWS::EC2::EIP",
"Properties": {
"Domain": "vpc"
}
},
"NATGW1": {
"Type": "AWS::EC2::NatGateway",
"Properties": {
"AllocationId": {
"Fn::GetAtt": [
"EIPNATGW1",
"AllocationId"
]
},
"SubnetId": {
"Ref": "ReservedNet1"
},
"Tags": [
{
"Key": "Name",
"Value": "NATGW1"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"InternalRT1NATGW1": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationCidrBlock": "0.0.0.0/0",
"NatGatewayId": {
"Ref": "NATGW1"
},
"RouteTableId": {
"Ref": "InternalRT1"
}
}
},
"InternalRT1NATGW1IPv6": {
"Type": "AWS::EC2::Route",
"Properties": {
"DestinationIpv6CidrBlock": "::/0",
"EgressOnlyInternetGatewayId": {
"Ref": "EgressGateway"
},
"RouteTableId": {
"Ref": "InternalRT1"
}
}
},
"VPCEndpoint": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupName": "VPCEndpoint",
"GroupDescription": "VPC Endpoint Interface Firewall Rules",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"SecurityGroupIngress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"SecurityGroupEgress": [
{
"IpProtocol": "icmp",
"FromPort": -1,
"ToPort": -1,
"CidrIp": "172.16.0.0/20",
"Description": "All ICMP Traffic"
},
{
"IpProtocol": "tcp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All TCP Traffic"
},
{
"IpProtocol": "udp",
"FromPort": 0,
"ToPort": 65535,
"CidrIp": "172.16.0.0/20",
"Description": "All UDP Traffic"
}
],
"Tags": [
{
"Key": "Name",
"Value": "VPCEndpoint"
},
{
"Key": "Name",
"Value": "PRIVATE-EGRESS-VPC"
},
{
"Key": "Template",
"Value": "VPC for private endpoints egress only"
}
]
}
},
"kinesisstreamsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kinesis-streams"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudtrailEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudtrail"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"cloudformationEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".cloudformation"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"elasticloadbalancingEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".elasticloadbalancing"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ec2EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"logsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".logs"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"monitoringEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".monitoring"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"s3EndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".s3"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "InternalRT1"
},
{
"Ref": "InternalRT2"
},
{
"Ref": "InternalRT3"
}
]
}
},
"dynamodbEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".dynamodb"
]
]
},
"VpcEndpointType": "Gateway",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PolicyDocument": "{\n \"Version\":\"2012-10-17\",\n \"Statement\":[\n {\n \"Effect\":\"Allow\",\n \"Principal\": \"*\",\n \"Action\":[\"s3:*\"],\n \"Resource\":[\"*\"]\n }\n ]\n}\n",
"RouteTableIds": [
{
"Ref": "PublicRT"
},
{
"Ref": "InternalRT1"
},
{
"Ref": "InternalRT2"
},
{
"Ref": "InternalRT3"
}
]
}
},
"ec2messagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ec2messages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"kmsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".kms"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"configEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".config"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"eventsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".events"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssm"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"snsEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sns"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"sagemakerruntimeEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".sagemaker.runtime"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"codebuildEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".codebuild"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"servicecatalogEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".servicecatalog"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"executeapiEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".execute-api"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"secretsmanagerEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".secretsmanager"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
},
"ssmmessagesEndPoint": {
"Type": "AWS::EC2::VPCEndpoint",
"Properties": {
"ServiceName": {
"Fn::Join": [
"",
[
"com.amazonaws.",
{
"Ref": "AWS::Region"
},
".ssmmessages"
]
]
},
"VpcEndpointType": "Interface",
"VpcId": {
"Ref": "PRIVATEEGRESSVPC"
},
"PrivateDnsEnabled": True,
"SubnetIds": [
{
"Ref": "ReservedMgmt1"
},
{
"Ref": "ReservedMgmt2"
},
{
"Ref": "ReservedMgmt3"
}
],
"SecurityGroupIds": [
{
"Ref": "VPCEndpoint"
}
]
}
}
},
"Description": "Private VPC Template",
"Parameters": {
"VGW": {
"Default": "vgw-012345678",
"Type": "String",
"Description": "VPC Gateway"
}
},
"Mappings": {},
"Outputs": {
"PRIVATEEGRESSVPC": {
"Description": "PRIVATEEGRESSVPC",
"Value": {
"Ref": "PRIVATEEGRESSVPC"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-VPCid"
}
}
},
"InternalRT3": {
"Description": "InternalRT3",
"Value": {
"Ref": "InternalRT3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT3"
}
}
},
"PublicRT": {
"Description": "PublicRT",
"Value": {
"Ref": "PublicRT"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-PublicRT"
}
}
},
"InternalRT2": {
"Description": "InternalRT2",
"Value": {
"Ref": "InternalRT2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT2"
}
}
},
"InternalRT1": {
"Description": "InternalRT1",
"Value": {
"Ref": "InternalRT1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-RouteTable-InternalRT1"
}
}
},
"ReservedMgmt1": {
"Description": "ReservedMgmt1",
"Value": {
"Ref": "ReservedMgmt1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt1"
}
}
},
"ReservedMgmt2": {
"Description": "ReservedMgmt2",
"Value": {
"Ref": "ReservedMgmt2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt2"
}
}
},
"ReservedMgmt3": {
"Description": "ReservedMgmt3",
"Value": {
"Ref": "ReservedMgmt3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedMgmt3"
}
}
},
"Internal1": {
"Description": "Internal1",
"Value": {
"Ref": "Internal1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal1"
}
}
},
"Internal2": {
"Description": "Internal2",
"Value": {
"Ref": "Internal2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal2"
}
}
},
"Internal3": {
"Description": "Internal3",
"Value": {
"Ref": "Internal3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-Internal3"
}
}
},
"ReservedNet3": {
"Description": "ReservedNet3",
"Value": {
"Ref": "ReservedNet3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet3"
}
}
},
"ReservedNet2": {
"Description": "ReservedNet2",
"Value": {
"Ref": "ReservedNet2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet2"
}
}
},
"ReservedNet1": {
"Description": "ReservedNet1",
"Value": {
"Ref": "ReservedNet1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-ReservedNet1"
}
}
},
"PerimeterInternal1": {
"Description": "PerimeterInternal1",
"Value": {
"Ref": "PerimeterInternal1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal1"
}
}
},
"PerimeterInternal2": {
"Description": "PerimeterInternal2",
"Value": {
"Ref": "PerimeterInternal2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal2"
}
}
},
"PerimeterInternal3": {
"Description": "PerimeterInternal3",
"Value": {
"Ref": "PerimeterInternal3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-Subnet-PerimeterInternal3"
}
}
},
"InternalSubnetAcl": {
"Description": "InternalSubnetAcl",
"Value": {
"Ref": "InternalSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-InternalSubnetAcl"
}
}
},
"RestrictedSubnetAcl": {
"Description": "RestrictedSubnetAcl",
"Value": {
"Ref": "RestrictedSubnetAcl"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NACL-RestrictedSubnetAcl"
}
}
},
"EIPNATGW3": {
"Description": "EIP for NATGW3",
"Value": {
"Ref": "EIPNATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW3"
}
}
},
"NATGW3": {
"Description": "NATGW3",
"Value": {
"Ref": "NATGW3"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW3"
}
}
},
"EIPNATGW2": {
"Description": "EIP for NATGW2",
"Value": {
"Ref": "EIPNATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW2"
}
}
},
"NATGW2": {
"Description": "NATGW2",
"Value": {
"Ref": "NATGW2"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW2"
}
}
},
"EIPNATGW1": {
"Description": "EIP for NATGW1",
"Value": {
"Ref": "EIPNATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-EIP-NATGW1"
}
}
},
"NATGW1": {
"Description": "NATGW1",
"Value": {
"Ref": "NATGW1"
},
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-NATGW-NATGW1"
}
}
}
}
}
}
actual = src.macro.handler(transform_call, "")
print("#####\n\n")
print(json.dumps(actual))
print("#####\n\n")
self.assertEquals(test_assert, actual)
| 45.411037 | 286 | 0.204921 | 17,535 | 580,126 | 6.778158 | 0.030168 | 0.012132 | 0.036263 | 0.008683 | 0.969526 | 0.963089 | 0.960086 | 0.949905 | 0.938673 | 0.934079 | 0 | 0.055788 | 0.70931 | 580,126 | 12,774 | 287 | 45.414592 | 0.649009 | 0 | 0 | 0.639868 | 0 | 0.007757 | 0.211994 | 0.057793 | 0 | 0 | 0 | 0 | 0.00047 | 1 | 0.000313 | false | 0 | 0.000392 | 0 | 0.00094 | 0.000313 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
53e4dc444d3b47a18c72bf5cf716039c82981532 | 8,600 | py | Python | Web_Crawler/update_insta_hashtags.py | joaopfonseca/social_media_crawler | 3abce6c850d203805f705e82612d98abf2eb4a47 | [
"MIT"
] | 1 | 2021-11-14T18:45:01.000Z | 2021-11-14T18:45:01.000Z | Web_Crawler/update_insta_hashtags.py | joaopfonseca/social_media_crawler | 3abce6c850d203805f705e82612d98abf2eb4a47 | [
"MIT"
] | 2 | 2021-06-08T19:27:42.000Z | 2021-06-08T19:32:43.000Z | Web_Crawler/update_insta_hashtags.py | joaopfonseca/social_media_crawler | 3abce6c850d203805f705e82612d98abf2eb4a47 | [
"MIT"
] | null | null | null | from InstagramAPI import InstagramAPI
import csv
import datetime
import pandas as pd
from instagram_access import ig_creds
def update_instagram_hashtags(keyword):
username = ig_creds(1)['username']
pwd = ig_creds(1)['pwd']
api = InstagramAPI(username, pwd)
api.login()
f = open('_data/%s_instagram_posts.csv' % keyword, 'a')
w = csv.writer(f)
jsono = ['can_viewer_save', 'caption','caption_is_edited','client_cache_key','code',
'comment_count','comment_likes_enabled','comment_threading_enabled','device_timestamp',
'filter_type','has_audio','has_liked','has_more_comments','id','image_versions2',
'is_dash_eligible','like_count','max_num_visible_preview_comments','media_type',
'number_of_qualities','organic_tracking_token','original_height','original_width',
'photo_of_you','pk','taken_at','user','video_dash_manifest','video_duration',
'video_versions','view_count','lat','lng','location']
fields_with_subfields = [
'caption',
'image_versions2',
'user',
'location']
subfields = {'caption':['bit_flags','content_type','created_at','created_at_utc',
'did_report_as_spam','media_id','pk','status','text',
'type','user','user_id'],
'image_versions2':['candidates'],
'user':['friendship_status','full_name','has_anonymous_profile_picture',
'is_favorite','is_private','is_unpublished','pk','profile_pic_url',
'username'],
'location':['address','city','external_source','facebook_places_id',
'lat','lng','name','pk','short_name']
}
def update_tag_feed(word):
next_max = 100000 #amount of tag pages which are loaded: 1 equals approx. 70/80 posts
next_max_id = ''
i=0
for n in range(next_max):
if i > 1000:
print('Done! %s posts processed', i)
break
api.getHashtagFeed(word,next_max_id)
data = api.LastJson
try:
for post in data['items']:
data_row = []
for field in jsono:
if field in post.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(post[field][subfield])
except TypeError:
data_row.append('')
else:
data_row.append(post[field])
else:
data_row.append('')
if field == 'location' and field not in post.keys():
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
i=i+1
if i % 500 == 0:
print( "%s Statuses Processed: %s" % (i, datetime.datetime.now()) )
next_max_id = data["next_max_id"]
except:
try:
next_max_id = data["next_max_id"]
except:
print("error next_max. Tag: ", next_max_id)
print('Done! %s posts processed' % i)
break
update_tag_feed(keyword)
#Sort by date and remove duplicates (most recent to oldest)
df = pd.read_csv('_data/%s_instagram_posts.csv' % keyword)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['pk'], keep='first').sort_values(['taken_at'], ascending=False).to_csv('_data/%s_instagram_posts.csv' % keyword, index=False)
def update_instagram_hashtags_gui(keyword, kw_number):
username = ig_creds(kw_number)['username']
pwd = ig_creds(kw_number)['pwd']
api = InstagramAPI(username, pwd)
api.login()
f = open('_data/%s_instagram_posts.csv' % keyword, 'a')
w = csv.writer(f)
jsono = ['can_viewer_save', 'caption','caption_is_edited','client_cache_key','code',
'comment_count','comment_likes_enabled','comment_threading_enabled','device_timestamp',
'filter_type','has_audio','has_liked','has_more_comments','id','image_versions2',
'is_dash_eligible','like_count','max_num_visible_preview_comments','media_type',
'number_of_qualities','organic_tracking_token','original_height','original_width',
'photo_of_you','pk','taken_at','user','video_dash_manifest','video_duration',
'video_versions','view_count','lat','lng','location']
fields_with_subfields = [
'caption',
'image_versions2',
'user',
'location']
subfields = {'caption':['bit_flags','content_type','created_at','created_at_utc',
'did_report_as_spam','media_id','pk','status','text',
'type','user','user_id'],
'image_versions2':['candidates'],
'user':['friendship_status','full_name','has_anonymous_profile_picture',
'is_favorite','is_private','is_unpublished','pk','profile_pic_url',
'username'],
'location':['address','city','external_source','facebook_places_id',
'lat','lng','name','pk','short_name']
}
def update_tag_feed(word):
next_max = 100000 #amount of tag pages which are loaded: 1 equals approx. 70/80 posts
next_max_id = ''
i=0
for n in range(next_max):
if i > 1000:
print('Done! %s posts processed' % i)
break
api.getHashtagFeed(word,next_max_id)
data = api.LastJson
try:
for post in data['items']:
data_row = []
for field in jsono:
if field in post.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(post[field][subfield])
except TypeError:
data_row.append('')
else:
data_row.append(post[field])
else:
data_row.append('')
if field == 'location' and field not in post.keys():
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
i=i+1
if i % 500 == 0:
print( "%s Statuses Processed: %s" % (i, datetime.datetime.now()) )
next_max_id = data["next_max_id"]
except:
try:
next_max_id = data["next_max_id"]
except:
print("error next_max. Tag: ", next_max_id)
print('Done! %s posts processed' % i)
break
update_tag_feed(keyword)
#Sort by date and remove duplicates (most recent to oldest)
df = pd.read_csv('_data/%s_instagram_posts.csv' % keyword)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['pk'], keep='first').sort_values(['taken_at'], ascending=False).to_csv('_data/%s_instagram_posts.csv' % keyword, index=False)
| 43.654822 | 196 | 0.496744 | 883 | 8,600 | 4.545866 | 0.215176 | 0.052317 | 0.051819 | 0.059791 | 0.933732 | 0.933732 | 0.933732 | 0.933732 | 0.933732 | 0.933732 | 0 | 0.015457 | 0.38314 | 8,600 | 196 | 197 | 43.877551 | 0.741188 | 0.028837 | 0 | 0.91411 | 0 | 0 | 0.249401 | 0.05103 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02454 | false | 0 | 0.030675 | 0 | 0.055215 | 0.04908 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
53f9ef5ea1004b9458f7f4d57d77863f0c888ec9 | 9,680 | py | Python | mayan/apps/document_states/tests/test_workflow_template_state_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/document_states/tests/test_workflow_template_state_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/document_states/tests/test_workflow_template_state_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from rest_framework import status
from mayan.apps.documents.tests.mixins.document_mixins import DocumentTestMixin
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from ..events import event_workflow_template_edited
from ..permissions import (
permission_workflow_template_edit, permission_workflow_template_view
)
from .literals import TEST_WORKFLOW_TEMPLATE_STATE_LABEL
from .mixins.workflow_template_mixins import WorkflowTemplateTestMixin
from .mixins.workflow_template_state_mixins import WorkflowTemplateStateAPIViewTestMixin
class WorkflowTemplateStatesAPIViewTestCase(
DocumentTestMixin, WorkflowTemplateStateAPIViewTestMixin,
WorkflowTemplateTestMixin, BaseAPITestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self._create_test_workflow_template()
def test_workflow_state_create_api_view_no_permission(self):
self._clear_events()
response = self._request_test_workflow_template_state_create_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_workflow_template.refresh_from_db()
self.assertEqual(self.test_workflow_template.states.count(), 0)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_create_api_view_with_access(self):
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_state_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.test_workflow_template.refresh_from_db()
self.assertEqual(
self.test_workflow_template.states.first().label,
TEST_WORKFLOW_TEMPLATE_STATE_LABEL
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(
events[0].action_object, self.test_workflow_template_state
)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_state_delete_api_view_no_permission(self):
self._create_test_workflow_template_state()
self._clear_events()
response = self._request_test_workflow_template_state_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_workflow_template.refresh_from_db()
self.assertEqual(self.test_workflow_template.states.count(), 1)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_delete_api_view_with_access(self):
self._create_test_workflow_template_state()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_state_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.test_workflow_template.refresh_from_db()
self.assertEqual(self.test_workflow_template.states.count(), 0)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_state_detail_api_view_no_permission(self):
self._create_test_workflow_template_state()
self._clear_events()
response = self._request_test_workflow_template_state_detail_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_detail_api_view_with_access(self):
self._create_test_workflow_template_state()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_view
)
self._clear_events()
response = self._request_test_workflow_template_state_detail_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['id'], self.test_workflow_template_state.pk
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_list_api_view_no_permission(self):
self._create_test_workflow_template_state()
self._clear_events()
response = self._request_test_workflow_template_state_list_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_list_api_view_with_access(self):
self._create_test_workflow_template_state()
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_view
)
self._clear_events()
response = self._request_test_workflow_template_state_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['results'][0]['id'],
self.test_workflow_template_state.pk
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_edit_api_view_via_patch_no_permission(self):
self._create_test_workflow_template_state()
test_workflow_template_state_label = self.test_workflow_template_state.label
self._clear_events()
response = self._request_test_workflow_template_state_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_workflow_template_state.refresh_from_db()
self.assertEqual(
self.test_workflow_template_state.label,
test_workflow_template_state_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_edit_api_view_via_patch_with_access(self):
self._create_test_workflow_template_state()
test_workflow_template_state_label = self.test_workflow_template_state.label
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_state_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_workflow_template_state.refresh_from_db()
self.assertNotEqual(
self.test_workflow_template_state.label,
test_workflow_template_state_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(
events[0].action_object, self.test_workflow_template_state
)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
def test_workflow_state_edit_api_view_via_put_no_permission(self):
self._create_test_workflow_template_state()
test_workflow_template_state_label = self.test_workflow_template_state.label
self._clear_events()
response = self._request_test_workflow_template_state_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_workflow_template_state.refresh_from_db()
self.assertEqual(
self.test_workflow_template_state.label,
test_workflow_template_state_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_workflow_state_edit_api_view_via_put_with_access(self):
self._create_test_workflow_template_state()
test_workflow_template_state_label = self.test_workflow_template_state.label
self.grant_access(
obj=self.test_workflow_template,
permission=permission_workflow_template_edit
)
self._clear_events()
response = self._request_test_workflow_template_state_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_workflow_template_state.refresh_from_db()
self.assertNotEqual(
self.test_workflow_template_state.label,
test_workflow_template_state_label
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(
events[0].action_object, self.test_workflow_template_state
)
self.assertEqual(events[0].target, self.test_workflow_template)
self.assertEqual(events[0].verb, event_workflow_template_edited.id)
| 36.666667 | 89 | 0.708264 | 1,120 | 9,680 | 5.617857 | 0.077679 | 0.211062 | 0.216147 | 0.194692 | 0.895264 | 0.885728 | 0.88096 | 0.869835 | 0.869835 | 0.866656 | 0 | 0.009005 | 0.219938 | 9,680 | 263 | 90 | 36.806084 | 0.824262 | 0 | 0 | 0.707447 | 0 | 0 | 0.001168 | 0 | 0 | 0 | 0 | 0 | 0.265957 | 1 | 0.069149 | false | 0 | 0.042553 | 0 | 0.12234 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
070e0acabb6aa6f4615405d41775d7f8ad41ac31 | 75 | py | Python | common/toontown/__init__.py | jaczerob/Toony | 3108ab161c0a15de2ada71d22aff25baca20eca4 | [
"Unlicense"
] | null | null | null | common/toontown/__init__.py | jaczerob/Toony | 3108ab161c0a15de2ada71d22aff25baca20eca4 | [
"Unlicense"
] | null | null | null | common/toontown/__init__.py | jaczerob/Toony | 3108ab161c0a15de2ada71d22aff25baca20eca4 | [
"Unlicense"
] | null | null | null | from common.toontown.accounts import *
from common.toontown.config import * | 37.5 | 38 | 0.826667 | 10 | 75 | 6.2 | 0.6 | 0.322581 | 0.580645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093333 | 75 | 2 | 39 | 37.5 | 0.911765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
0724f11aa0563b214c8fd1ade7aa5e0033449c24 | 113 | py | Python | 20_solo/lib/mmdet/version.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | 20_solo/lib/mmdet/version.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | 20_solo/lib/mmdet/version.py | Boltuzamaki/Monk_Object_Detection | baf113ef6db8b531d0ef6413538e49d422163a20 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # GENERATED VERSION FILE
# TIME: Fri Sep 25 06:29:09 2020
__version__ = '1.0.0+db93575'
short_version = '1.0.0'
| 18.833333 | 32 | 0.699115 | 21 | 113 | 3.52381 | 0.714286 | 0.216216 | 0.243243 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242105 | 0.159292 | 113 | 5 | 33 | 22.6 | 0.536842 | 0.469027 | 0 | 0 | 1 | 0 | 0.315789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4ada921ea248ea0f82709ffd5839cd85e5b786d7 | 30,219 | py | Python | sdk/python/pulumi_aws/sagemaker/workteam.py | rapzo/pulumi-aws | 390a098221315d98a54ba97d1559e750dc3053b7 | [
"ECL-2.0",
"Apache-2.0"
] | 260 | 2018-06-18T14:57:00.000Z | 2022-03-29T11:41:03.000Z | sdk/python/pulumi_aws/sagemaker/workteam.py | rapzo/pulumi-aws | 390a098221315d98a54ba97d1559e750dc3053b7 | [
"ECL-2.0",
"Apache-2.0"
] | 1,154 | 2018-06-19T20:38:20.000Z | 2022-03-31T19:48:16.000Z | sdk/python/pulumi_aws/sagemaker/workteam.py | rapzo/pulumi-aws | 390a098221315d98a54ba97d1559e750dc3053b7 | [
"ECL-2.0",
"Apache-2.0"
] | 115 | 2018-06-28T03:20:27.000Z | 2022-03-29T11:41:06.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['WorkteamArgs', 'Workteam']
@pulumi.input_type
class WorkteamArgs:
def __init__(__self__, *,
description: pulumi.Input[str],
member_definitions: pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]],
workforce_name: pulumi.Input[str],
workteam_name: pulumi.Input[str],
notification_configuration: Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Workteam resource.
:param pulumi.Input[str] description: A description of the work team.
:param pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]] member_definitions: A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
:param pulumi.Input[str] workforce_name: The name of the Workteam (must be unique).
:param pulumi.Input[str] workteam_name: The name of the workforce.
:param pulumi.Input['WorkteamNotificationConfigurationArgs'] notification_configuration: Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "member_definitions", member_definitions)
pulumi.set(__self__, "workforce_name", workforce_name)
pulumi.set(__self__, "workteam_name", workteam_name)
if notification_configuration is not None:
pulumi.set(__self__, "notification_configuration", notification_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> pulumi.Input[str]:
"""
A description of the work team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: pulumi.Input[str]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="memberDefinitions")
def member_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]]:
"""
A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
"""
return pulumi.get(self, "member_definitions")
@member_definitions.setter
def member_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]]):
pulumi.set(self, "member_definitions", value)
@property
@pulumi.getter(name="workforceName")
def workforce_name(self) -> pulumi.Input[str]:
"""
The name of the Workteam (must be unique).
"""
return pulumi.get(self, "workforce_name")
@workforce_name.setter
def workforce_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workforce_name", value)
@property
@pulumi.getter(name="workteamName")
def workteam_name(self) -> pulumi.Input[str]:
"""
The name of the workforce.
"""
return pulumi.get(self, "workteam_name")
@workteam_name.setter
def workteam_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workteam_name", value)
@property
@pulumi.getter(name="notificationConfiguration")
def notification_configuration(self) -> Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']]:
"""
Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
"""
return pulumi.get(self, "notification_configuration")
@notification_configuration.setter
def notification_configuration(self, value: Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']]):
pulumi.set(self, "notification_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _WorkteamState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
member_definitions: Optional[pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]]] = None,
notification_configuration: Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']] = None,
subdomain: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workforce_name: Optional[pulumi.Input[str]] = None,
workteam_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Workteam resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Workteam.
:param pulumi.Input[str] description: A description of the work team.
:param pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]] member_definitions: A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
:param pulumi.Input['WorkteamNotificationConfigurationArgs'] notification_configuration: Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
:param pulumi.Input[str] subdomain: The subdomain for your OIDC Identity Provider.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input[str] workforce_name: The name of the Workteam (must be unique).
:param pulumi.Input[str] workteam_name: The name of the workforce.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if member_definitions is not None:
pulumi.set(__self__, "member_definitions", member_definitions)
if notification_configuration is not None:
pulumi.set(__self__, "notification_configuration", notification_configuration)
if subdomain is not None:
pulumi.set(__self__, "subdomain", subdomain)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if workforce_name is not None:
pulumi.set(__self__, "workforce_name", workforce_name)
if workteam_name is not None:
pulumi.set(__self__, "workteam_name", workteam_name)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Workteam.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the work team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="memberDefinitions")
def member_definitions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]]]:
"""
A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
"""
return pulumi.get(self, "member_definitions")
@member_definitions.setter
def member_definitions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WorkteamMemberDefinitionArgs']]]]):
pulumi.set(self, "member_definitions", value)
@property
@pulumi.getter(name="notificationConfiguration")
def notification_configuration(self) -> Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']]:
"""
Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
"""
return pulumi.get(self, "notification_configuration")
@notification_configuration.setter
def notification_configuration(self, value: Optional[pulumi.Input['WorkteamNotificationConfigurationArgs']]):
pulumi.set(self, "notification_configuration", value)
@property
@pulumi.getter
def subdomain(self) -> Optional[pulumi.Input[str]]:
"""
The subdomain for your OIDC Identity Provider.
"""
return pulumi.get(self, "subdomain")
@subdomain.setter
def subdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subdomain", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="workforceName")
def workforce_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Workteam (must be unique).
"""
return pulumi.get(self, "workforce_name")
@workforce_name.setter
def workforce_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workforce_name", value)
@property
@pulumi.getter(name="workteamName")
def workteam_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the workforce.
"""
return pulumi.get(self, "workteam_name")
@workteam_name.setter
def workteam_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workteam_name", value)
class Workteam(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
member_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkteamMemberDefinitionArgs']]]]] = None,
notification_configuration: Optional[pulumi.Input[pulumi.InputType['WorkteamNotificationConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workforce_name: Optional[pulumi.Input[str]] = None,
workteam_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Sagemaker Workteam resource.
## Example Usage
### Cognito Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Workteam("example",
workteam_name="example",
workforce_name=aws_sagemaker_workforce["example"]["id"],
description="example",
member_definitions=[aws.sagemaker.WorkteamMemberDefinitionArgs(
cognito_member_definition=aws.sagemaker.WorkteamMemberDefinitionCognitoMemberDefinitionArgs(
client_id=aws_cognito_user_pool_client["example"]["id"],
user_pool=aws_cognito_user_pool_domain["example"]["user_pool_id"],
user_group=aws_cognito_user_group["example"]["id"],
),
)])
```
### Oidc Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Workteam("example",
workteam_name="example",
workforce_name=aws_sagemaker_workforce["example"]["id"],
description="example",
member_definitions=[aws.sagemaker.WorkteamMemberDefinitionArgs(
oidc_member_definition=aws.sagemaker.WorkteamMemberDefinitionOidcMemberDefinitionArgs(
groups=["example"],
),
)])
```
## Import
Sagemaker Workteams can be imported using the `workteam_name`, e.g.
```sh
$ pulumi import aws:sagemaker/workteam:Workteam example example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description of the work team.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkteamMemberDefinitionArgs']]]] member_definitions: A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
:param pulumi.Input[pulumi.InputType['WorkteamNotificationConfigurationArgs']] notification_configuration: Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] workforce_name: The name of the Workteam (must be unique).
:param pulumi.Input[str] workteam_name: The name of the workforce.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkteamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sagemaker Workteam resource.
## Example Usage
### Cognito Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Workteam("example",
workteam_name="example",
workforce_name=aws_sagemaker_workforce["example"]["id"],
description="example",
member_definitions=[aws.sagemaker.WorkteamMemberDefinitionArgs(
cognito_member_definition=aws.sagemaker.WorkteamMemberDefinitionCognitoMemberDefinitionArgs(
client_id=aws_cognito_user_pool_client["example"]["id"],
user_pool=aws_cognito_user_pool_domain["example"]["user_pool_id"],
user_group=aws_cognito_user_group["example"]["id"],
),
)])
```
### Oidc Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.Workteam("example",
workteam_name="example",
workforce_name=aws_sagemaker_workforce["example"]["id"],
description="example",
member_definitions=[aws.sagemaker.WorkteamMemberDefinitionArgs(
oidc_member_definition=aws.sagemaker.WorkteamMemberDefinitionOidcMemberDefinitionArgs(
groups=["example"],
),
)])
```
## Import
Sagemaker Workteams can be imported using the `workteam_name`, e.g.
```sh
$ pulumi import aws:sagemaker/workteam:Workteam example example
```
:param str resource_name: The name of the resource.
:param WorkteamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkteamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
member_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkteamMemberDefinitionArgs']]]]] = None,
notification_configuration: Optional[pulumi.Input[pulumi.InputType['WorkteamNotificationConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workforce_name: Optional[pulumi.Input[str]] = None,
workteam_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkteamArgs.__new__(WorkteamArgs)
if description is None and not opts.urn:
raise TypeError("Missing required property 'description'")
__props__.__dict__["description"] = description
if member_definitions is None and not opts.urn:
raise TypeError("Missing required property 'member_definitions'")
__props__.__dict__["member_definitions"] = member_definitions
__props__.__dict__["notification_configuration"] = notification_configuration
__props__.__dict__["tags"] = tags
if workforce_name is None and not opts.urn:
raise TypeError("Missing required property 'workforce_name'")
__props__.__dict__["workforce_name"] = workforce_name
if workteam_name is None and not opts.urn:
raise TypeError("Missing required property 'workteam_name'")
__props__.__dict__["workteam_name"] = workteam_name
__props__.__dict__["arn"] = None
__props__.__dict__["subdomain"] = None
__props__.__dict__["tags_all"] = None
super(Workteam, __self__).__init__(
'aws:sagemaker/workteam:Workteam',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
member_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkteamMemberDefinitionArgs']]]]] = None,
notification_configuration: Optional[pulumi.Input[pulumi.InputType['WorkteamNotificationConfigurationArgs']]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workforce_name: Optional[pulumi.Input[str]] = None,
workteam_name: Optional[pulumi.Input[str]] = None) -> 'Workteam':
"""
Get an existing Workteam resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this Workteam.
:param pulumi.Input[str] description: A description of the work team.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkteamMemberDefinitionArgs']]]] member_definitions: A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
:param pulumi.Input[pulumi.InputType['WorkteamNotificationConfigurationArgs']] notification_configuration: Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
:param pulumi.Input[str] subdomain: The subdomain for your OIDC Identity Provider.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
:param pulumi.Input[str] workforce_name: The name of the Workteam (must be unique).
:param pulumi.Input[str] workteam_name: The name of the workforce.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WorkteamState.__new__(_WorkteamState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["member_definitions"] = member_definitions
__props__.__dict__["notification_configuration"] = notification_configuration
__props__.__dict__["subdomain"] = subdomain
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["workforce_name"] = workforce_name
__props__.__dict__["workteam_name"] = workteam_name
return Workteam(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this Workteam.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
A description of the work team.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="memberDefinitions")
def member_definitions(self) -> pulumi.Output[Sequence['outputs.WorkteamMemberDefinition']]:
"""
A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see Member Definition details below.
"""
return pulumi.get(self, "member_definitions")
@property
@pulumi.getter(name="notificationConfiguration")
def notification_configuration(self) -> pulumi.Output[Optional['outputs.WorkteamNotificationConfiguration']]:
"""
Configures notification of workers regarding available or expiring work items. see Notification Configuration details below.
"""
return pulumi.get(self, "notification_configuration")
@property
@pulumi.getter
def subdomain(self) -> pulumi.Output[str]:
"""
The subdomain for your OIDC Identity Provider.
"""
return pulumi.get(self, "subdomain")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block).
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="workforceName")
def workforce_name(self) -> pulumi.Output[str]:
"""
The name of the Workteam (must be unique).
"""
return pulumi.get(self, "workforce_name")
@property
@pulumi.getter(name="workteamName")
def workteam_name(self) -> pulumi.Output[str]:
"""
The name of the workforce.
"""
return pulumi.get(self, "workteam_name")
| 53.86631 | 605 | 0.68586 | 3,435 | 30,219 | 5.856186 | 0.065502 | 0.072181 | 0.050109 | 0.028435 | 0.894015 | 0.881239 | 0.855836 | 0.822778 | 0.816365 | 0.799911 | 0 | 0.000042 | 0.218273 | 30,219 | 560 | 606 | 53.9625 | 0.851501 | 0.449121 | 0 | 0.618056 | 1 | 0 | 0.142564 | 0.066145 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159722 | false | 0.003472 | 0.024306 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ab0bd377c6bb02d8865e1e272cde4126f49b6263 | 3,998 | py | Python | reddit_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 8 | 2018-04-16T09:12:59.000Z | 2020-12-08T12:35:56.000Z | reddit_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 10 | 2018-01-23T16:03:51.000Z | 2021-03-31T18:35:22.000Z | reddit_utils.py | fastent/fastent | ce49b250e7c5b2c475b981307f11a0595fc96a89 | [
"MIT"
] | 3 | 2018-06-26T18:19:38.000Z | 2021-08-09T14:16:45.000Z | import praw
import traceback
from .fast_utils import exact_word_match
r = praw.Reddit(client_id='jaFBw59_YwZq2g',
client_secret='NX8gsx7NAppehlUJH3B3Db1yH7w',
user_agent='Anotator', username='fastent_reddit',
password='fastent2017')
def random_find_context_comments_depth(word):
answer_list = []
try:
subreddit = r.subreddit('AskReddit')
comment_iterator = 0
for submission in subreddit.stream.submissions():
submission.comments.replace_more(limit=None)
kyanq = submission.comments.list()
for comment in kyanq:
comment_iterator += 1
if comment_iterator >= 100:
return {word: answer_list}
if exact_word_match(word, comment.body):
answer_list.append(comment.body)
if len(answer_list) >= 2:
return {word: answer_list}
except Exception as e:
print(e)
return {word: answer_list}
def radom_find_context_comments(word):
answer_list = []
try:
subreddit = r.subreddit('AskReddit')
comment_iterator = 0
for submission in subreddit.stream.submissions():
submission.comments.replace_more(limit=None)
for comment in submission.comments:
comment_iterator += 1
if comment_iterator >= 100:
return {word: answer_list}
if exact_word_match(word, comment.body):
answer_list.append(comment.body)
if len(answer_list) >= 2:
return {word: answer_list}
except Exception as e:
print(e)
return {word: answer_list}
def find_context_fast(word, min_context_amount=5):
"""
Return a context for a word after fast title traverse
Args:
word (str): The word that needs context
min_context_amount (int): maximum number of contexts to find_context_fast
Returns:
(dict) : {word (str): context (list)} the resulting pair of word:contexts
"""
answer_list = []
try:
comment_iterator = 0
for submission in r.subreddit('all').search(word):
comment_iterator += 1
if exact_word_match(word, submission.title):
answer_list.append(submission.title)
if len(answer_list) >= min_context_amount:
return {word: answer_list}
except Exception as e:
print(traceback.format_exc())
return None
return {word: answer_list}
def find_context_long(word, min_context_amount=5, comment_depth=100):
"""
Return a context for a word after long comment traverse
Args:
word (str): The word that needs context
min_context_amount (int): maximum number of contexts to find_context_fast
comment_depth (int): Maximum comment depth for traversal
Returns:
(dict) : {word (str): context (list)} the resulting pair of word:contexts
"""
answer_list = []
try:
for submission in r.subreddit('all').search(word):
comment_iterator = 0
if len(answer_list) >= min_context_amount:
return {word: answer_list}
if exact_word_match(word, submission.title):
answer_list.append(submission.title)
submission.comments.replace_more(limit=0)
for comment in submission.comments:
comment_iterator += 1
if comment_iterator >= comment_depth:
break #dont wont to use break really
if exact_word_match(word, comment.body):
print("good comment")
answer_list.append(comment.body)
if len(answer_list) >= min_context_amount:
return {word: answer_list}
except Exception as e:
print(traceback.format_exc())
return {word: answer_list}
| 30.06015 | 82 | 0.598299 | 451 | 3,998 | 5.101996 | 0.21286 | 0.108648 | 0.079096 | 0.095611 | 0.802694 | 0.769665 | 0.762712 | 0.706215 | 0.706215 | 0.694481 | 0 | 0.012891 | 0.32091 | 3,998 | 132 | 83 | 30.287879 | 0.834622 | 0.158829 | 0 | 0.775 | 0 | 0 | 0.033455 | 0.008212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.0125 | 0.0375 | 0 | 0.2375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
db696f2dafc0fcdee36b5fe657f76e3a620b8ca3 | 32,550 | py | Python | src/Fonctions/Menu.py | ProgBiss/Sudoku | d583e2f389dc08af4a907d81ce7d6adfe2e12211 | [
"MIT"
] | null | null | null | src/Fonctions/Menu.py | ProgBiss/Sudoku | d583e2f389dc08af4a907d81ce7d6adfe2e12211 | [
"MIT"
] | null | null | null | src/Fonctions/Menu.py | ProgBiss/Sudoku | d583e2f389dc08af4a907d81ce7d6adfe2e12211 | [
"MIT"
] | null | null | null | '''
Module qui contient tous les menus du jeu.
'''
from Colorama.colorama import *
#Fore: BLACK, RED, YELLOW, GREEN, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Back: BLACK, RED, YELLOW, GREEN, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Style: DIM, NORMAL, BRIGHT, RESET_ALL
#print(Fore.RESET + Back.RESET + Style.RESET_ALL)
def AfficherMenuPrincipal(int_selection): # Fonction pour le menu principal
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ █", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 0:
print(Fore.GREEN+"█"+Fore.RESET+" > Jouer < Règlements "+Fore.GREEN+"█", end = "")
elif int_selection == 1:
print(Fore.GREEN+"█"+Fore.RESET+" Jouer > Règlements < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Jouer Règlements "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 2:
print(Fore.GREEN+"█"+Fore.RESET+" > High Scores < Crédits "+Fore.GREEN+"█", end = "")
elif int_selection == 3:
print(Fore.GREEN+"█"+Fore.RESET+" High Scores > Crédits < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" High Scores Crédits "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 4:
print(Fore.GREEN+"█"+Fore.RESET+" > Quitter < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Quitter "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Contrôles : - Entrée/Espace : Sélectionner "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" - Flèches Haut/Bas : Déplacements "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" - Échap : Retour/Quitter "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuJouer(int_selection): # Fonction pour le menu de sélection de partie
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 0:
print(Fore.GREEN+"█"+Fore.RESET+" > Nouvelle partie < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Nouvelle partie "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 1:
print(Fore.GREEN+"█"+Fore.RESET+" > Reprendre partie < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Reprendre partie "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 2:
print(Fore.GREEN+"█"+Fore.RESET+" > Retour < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Retour "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuCredits(): # Fonction pour le menu qui affiche les crédits
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Projet session "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Jeu Sudoku "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Librairie externe Colorama par Jonathan Hartley 2013 "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Par Jessee Lefebvre et Nicolas Bisson "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Automne 2014 "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Appuyez sur une touche pour continuer... "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuQuitter(): # Fonction qui affiche le menu de fin, lorsqu'on quitte le jeu
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ █", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Merci d'avoir jouer! "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Au revoir et à la prochaine! "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Appuyez sur une touche pour continuer... "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuDifficulte(int_selection): # Fonction qui affiche le menu de sélection de difficultés
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ █", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 0:
print(Fore.GREEN+"█"+Fore.RESET+" > Débutant < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Débutant "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 1:
print(Fore.GREEN+"█"+Fore.RESET+" > Intermédiaire < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Intermédiaire "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 2:
print(Fore.GREEN+"█"+Fore.RESET+" > Difficile < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Difficile "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 3:
print(Fore.GREEN+"█"+Fore.RESET+" > Expert < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Expert "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 4:
print(Fore.GREEN+"█"+Fore.RESET+" > Retour < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Retour "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuReglements(): # Fonction qui affiche le menu qui explique les règlements
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ █", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Règlements "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Pas 2 chiffres identiques dans la même colonne "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Pas 2 chiffres identiques dans la même rangée "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Pas 2 chiffres identiques dans le même carré "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Appuyez sur une touche pour continuer... "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuValidation(int_selection): # Fonction pour le menu de validation quand on quitte la partie en cours
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Souhaitez-vous sauvegarder avant de quitter? "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 0:
print(Fore.GREEN+"█"+Fore.RESET+" > Oui < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Oui "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 1:
print(Fore.GREEN+"█"+Fore.RESET+" > Non < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Non "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 2:
print(Fore.GREEN+"█"+Fore.RESET+" > Annuler < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Annuler "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
# Fonction pour le menu qui demande dans quel sauvegarde on veut enregistrer ou laquelle charger selon l'option qui est sélectionner lorsque l'ont veut
def AfficherMenuPartie(int_selection, int_action): # sauvegarder ou charger une partie
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_action == 0:
print(Fore.GREEN+"█"+Fore.RESET+" Dans quelle sauvegarde souhaitez-vous sauvegarder? "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Quelle sauvegarde souhaitez-vous charger? "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 0:
print(Fore.GREEN+"█"+Fore.RESET+" > Sauvegarde 1 < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Sauvegarde 1 "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 1:
print(Fore.GREEN+"█"+Fore.RESET+" > Sauvegarde 2 < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Sauvegarde 2 "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
if int_selection == 2:
print(Fore.GREEN+"█"+Fore.RESET+" > Sauvegarde 3 < "+Fore.GREEN+"█", end = "")
else:
print(Fore.GREEN+"█"+Fore.RESET+" Sauvegarde 3 "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "")
def AfficherMenuPartieInexistante(): # Fonction qui affiche le menu lorsque l'on charge une partie qui n'existe pas
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████ ", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄ █", end = "")
print(Fore.GREEN+"█ █ █ █ █▀▀▀▄ ▄▀▀▀▄ █ ▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ ▀▄▄▄ █ █ █ █ █ █ █▄▀ █ █ █", end = "")
print(Fore.GREEN+"█ █ █ █ █ █ █ █ █▀▄ █ █ █", end = "")
print(Fore.GREEN+"█ ▄▄▄▄▀ ▀▄▄▄▀ █▄▄▄▀ ▀▄▄▄▀ █ ▀▄ ▀▄▄▄▀ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Il n'y a pas de partie "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Veuillez en charger une autre "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█"+Fore.RESET+" Appuyez sur une touche pour continuer... "+Fore.GREEN+"█", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+"█ █", end = "")
print(Fore.GREEN+" ██████████████████████████████████████████████████████████████████████████████", end = "") | 101.086957 | 155 | 0.241536 | 2,525 | 32,550 | 4.112871 | 0.067723 | 0.284256 | 0.298507 | 0.327877 | 0.899759 | 0.888974 | 0.881078 | 0.871738 | 0.871738 | 0.860664 | 0 | 0.002679 | 0.57576 | 32,550 | 322 | 156 | 101.086957 | 0.564632 | 0.027404 | 0 | 0.778157 | 0 | 0 | 0.619243 | 0.044378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030717 | false | 0 | 0.003413 | 0 | 0.03413 | 0.836177 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 14 |
db6f460632f406b96f5899e159ccdb5f809acbc2 | 104,620 | py | Python | Project 1.py | SpeedyE1780/Battleship | 8dd44786c742e2a0021087ec5c5578865b0c79d1 | [
"MIT"
] | null | null | null | Project 1.py | SpeedyE1780/Battleship | 8dd44786c742e2a0021087ec5c5578865b0c79d1 | [
"MIT"
] | null | null | null | Project 1.py | SpeedyE1780/Battleship | 8dd44786c742e2a0021087ec5c5578865b0c79d1 | [
"MIT"
] | null | null | null | from tkinter import *
import tkinter.messagebox
from tkinter.ttk import Combobox
import random
import tkinter.simpledialog
from tkinter.filedialog import asksaveasfilename , askopenfilename
from winsound import *
import pygame.mixer
pygame.init()
class BattleShip:
def __init__(Self):
pygame.mixer.music.load("Sounds/Background.wav")
Self.hit_sound = pygame.mixer.Sound("Sounds/Hit.wav")
Self.miss_sound = pygame.mixer.Sound("Sounds/Miss.wav")
##Initial Window
Self.Window = Tk()
Self.Window.resizable(False , False)
Self.Window.title("BATTLESHIP!")
##Create Menu
Menubar = Menu(Self.Window)
Self.Window.config(menu = Menubar)
FileMenu = Menu(Menubar , tearoff = 0)
Menubar.add_cascade(label = "File" , menu =FileMenu)
FileMenu.add_command(label = "Open" , command = Self.OpenSaveGame)
FileMenu.add_command(label = "Save" , command = Self.SaveGame)
FileMenu.add_separator()
FileMenu.add_command(label = "Help" , command = Self.showHelp)
FileMenu.add_separator()
FileMenu.add_command(label = "Exit" , command = exit)
##Main Menu Frame
Self.MainMenu = Frame(Self.Window)
Self.MainMenu.pack()
Self.MM = PhotoImage(file = "Images/BattleShip_Background.gif")
Self.MainMenuBG = Label(Self.MainMenu , image = Self.MM)
Self.MainMenuBG.pack()
Self.StartGamebtn = Button(Self.MainMenu , text = "Start" ,
command = Self.start)
Self.StartGamebtn.pack()
Self.nbPlayers = IntVar()
Self.Player1RB = Radiobutton(Self.MainMenu , text = "1 Player " ,
variable = Self.nbPlayers , value = 1)
Self.Player1RB.pack()
Self.Player2RB = Radiobutton(Self.MainMenu , text = "2 Player " ,
variable = Self.nbPlayers , value = 2)
Self.Player2RB.pack()
Self.SFX = False
Self.SoundCHK = Checkbutton(Self.MainMenu , text = "Sound Effects" , variable = Self.SFX , command = Self.playSFX)
Self.SoundCHK.pack()
##Game Frame
Self.GameFrame = Frame(Self.Window)
Self.Ships = ("Carrier" , "Battleship" , "Cruiser" , "Submarine" , "Destroyer")
Self.Orient = ("Vertical" , "Horizontal")
"""
Ships Position on the grid:
Opponent Miss = 9
Empty Space = 0
Damaged Ship = 1
Carrier = 2
Battleship = 3
Cruiser = 4
Submarine = 5
Destroyer = 6
"""
Self.player1_ship = []
Self.player1_Carrierlife = 5
Self.player1_Battleshiplife = 4
Self.player1_Cruiserlife = 3
Self.player1_Submarinelife = 3
Self.player1_Destroyerlife = 2
Self.player2_ship = []
Self.player2_Carrierlife = 5
Self.player2_Battleshiplife = 4
Self.player2_Cruiserlife = 3
Self.player2_Submarinelife = 3
Self.player2_Destroyerlife = 2
Self.computer_ship = []
Self.computer_Carrierlife = 5
Self.computer_Battleshiplife = 4
Self.computer_Cruiserlife = 3
Self.computer_Submarinelife = 3
Self.computer_Destroyerlife = 2
for i in range(0 , 10):
Self.player1_ship.append([0])
Self.player2_ship.append([0])
Self.computer_ship.append([0])
for j in range(0 , 9):
Self.player1_ship[i].append(0)
Self.player2_ship[i].append(0)
Self.computer_ship[i].append(0)
"""
Hits Position on the grid:
No Hit = 0
Hit = 1
Miss = 2
"""
Self.player1_hits = []
Self.player2_hits = []
Self.computer_hits = []
for i in range(0 , 10):
Self.player1_hits.append([0])
Self.player2_hits.append([0])
Self.computer_hits.append([0])
for j in range(0 , 9):
Self.player1_hits[i].append(0)
Self.player2_hits[i].append(0)
Self.computer_hits[i].append(0)
##The total number of hits needed to destroy every ship: 5 + 4 + 3 + 3 + 2 = 17
Self.player1life = 17
Self.player2life = 17
Self.computerlife = 17
Self.P1name = ""
Self.P1password = ""
Self.P2name = ""
Self.P2password = ""
Self.canOpen = True
Self.canSave = False
Self.sMainMenu()
Self.Window.mainloop()
def OpenSaveGame(Self):
if Self.canOpen:
try:
filepath = askopenfilename()
file = open(filepath , "r")
Content = file.read()
Content = Content.split("/")
##Check that it's a savefile
if Content[0] == "Battleship":
Self.MainMenu.pack_forget()
Self.GameFrame.pack()
Self.canOpen = False
if int(Content[1]) == 1:
##Player 1 Data
Self.PlayerFrame = Frame(Self.GameFrame)
Self.PlayerFrame.pack()
##Player Ships Frame
Self.PlayerShips = Frame(Self.PlayerFrame)
Self.PlayerShips.pack()
label = Label(Self.PlayerShips , text = "Player Ships")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.PlayerShips , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.PlayerShips , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Player Hits Frame
Self.PlayerHitsFrame = Frame(Self.PlayerFrame)
Self.PlayerHitsFrame.pack()
label = Label(Self.PlayerHitsFrame , text = "Player Hits")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.PlayerHitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.PlayerHitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Fill The Player's variable
counter = 0
for i in range(0 , len(Self.player1_hits)):
for j in range(0 , len(Self.player1_hits[0])):
Self.player1_ship[i][j] = int(Content[2][counter])
if Self.player1_ship[i][j] > 1 and Self.player1_ship[i][j] <9:
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_ship[i][j] == 1:
label = Label(Self.PlayerShips , text = "*")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_ship[i][j] == 9:
label = Label(Self.PlayerShips , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.PlayerShips , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.player1_hits[i][j] = int(Content[3][counter])
if Self.player1_hits[i][j] == 1:
label = Label(Self.PlayerHitsFrame , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_hits[i][j] == 2:
label = Label(Self.PlayerHitsFrame , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.PlayerHitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
counter += 1
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.PlayerFrame , text = "Row")
Self.rowcoordinate.pack()
Self.rowhit = StringVar()
Self.rowentry = Entry(Self.PlayerFrame , textvariable = Self.rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.PlayerFrame , text = "Column")
Self.columncoordinate.pack()
Self.columnhit = StringVar()
Self.columnentry = Entry(Self.PlayerFrame , textvariable = Self.columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.PlayerFrame , text = "HIT!" , command = Self.playerhit)
Self.hitbutton.pack()
##Player Life
Self.player1life = int(Content[4])
Self.player1_Carrierlife = int(Content[5])
Self.player1_Battleshiplife = int(Content[6])
Self.player1_Cruiserlife = int(Content[7])
Self.player1_Submarinelife = int(Content[8])
Self.player1_Destroyerlife = int(Content[9])
##Computer Data
##Computer Ships and Hits
counter = 0
for i in range(0 , len(Self.computer_ship)):
for j in range(0 , len(Self.computer_ship[0])):
Self.computer_ship[i][j] = int(Content[10][counter])
Self.computer_hits[i][j] = int(Content [11][counter])
counter += 1
##Computer Life
Self.computerlife = int(Content [12])
Self.computer_Carrierlife = int(Content[13])
Self.computer_Battleshiplife = int(Content[14])
Self.computer_Cruiserlife = int(Content[15])
Self.computer_Submarinelife = int(Content[16])
Self.computer_Destroyerlife = int(Content[17])
else:
##Player 1 Data
Self.Player1Frame = Frame(Self.GameFrame)
##Player 1 Info
Self.P1name = Content[18]
Self.P1password = Content[19]
##Player1 Ships Frame
Self.Player1Ships = Frame(Self.Player1Frame)
Self.Player1Ships.pack()
label = Label(Self.Player1Ships , text = Self.P1name + "'s Ships: ")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player1Ships , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player1Ships , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Player Hits Frame
Self.Player1HitsFrame = Frame(Self.Player1Frame)
Self.Player1HitsFrame.pack()
label = Label(Self.Player1HitsFrame , text = Self.P1name + "'s Hits:")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player1HitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player1HitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Fill The Player's variable
counter = 0
for i in range(0 , len(Self.player1_hits)):
for j in range(0 , len(Self.player1_hits[0])):
Self.player1_ship[i][j] = int(Content[2][counter])
if Self.player1_ship[i][j] > 1 and Self.player1_ship[i][j] < 9:
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_ship[i][j] == 1:
label = Label(Self.Player1Ships , text = "*")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_ship[i][j] == 9:
label = Label(Self.Player1Ships , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.Player1Ships , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.player1_hits[i][j] = int(Content[3][counter])
if Self.player1_hits[i][j] == 1:
label = Label(Self.Player1HitsFrame , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player1_hits[i][j] == 2:
label = Label(Self.Player1HitsFrame , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.Player1HitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
counter += 1
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.Player1Frame , text = "Row")
Self.rowcoordinate.pack()
Self.P1rowhit = StringVar()
Self.rowentry = Entry(Self.Player1Frame , textvariable = Self.P1rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.Player1Frame , text = "Column")
Self.columncoordinate.pack()
Self.P1columnhit = StringVar()
Self.columnentry = Entry(Self.Player1Frame , textvariable = Self.P1columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.Player1Frame , text = "HIT!" , command = Self.player1hit)
Self.hitbutton.pack()
##Player Life
Self.player1life = int(Content[4])
Self.player1_Carrierlife = int(Content[5])
Self.player1_Battleshiplife = int(Content[6])
Self.player1_Cruiserlife = int(Content[7])
Self.player1_Submarinelife = int(Content[8])
Self.player1_Destroyerlife = int(Content[9])
##Player 2 Data
Self.Player2Frame = Frame(Self.GameFrame)
##Player 2 Info
Self.P2name = Content[20]
Self.P2password = Content[21]
##Player2 Ships Frame
Self.Player2Ships = Frame(Self.Player2Frame)
Self.Player2Ships.pack()
label = Label(Self.Player2Ships , text = Self.P2name + "'s Ships:")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player2Ships , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player2Ships , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Player Hits Frame
Self.Player2HitsFrame = Frame(Self.Player2Frame)
Self.Player2HitsFrame.pack()
label = Label(Self.Player2HitsFrame , text = Self.P2name + "'s Hits")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player2HitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player2HitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
##Fill The Player's variable
counter = 0
for i in range(0 , len(Self.player2_hits)):
for j in range(0 , len(Self.player2_hits[0])):
Self.player2_ship[i][j] = int(Content[10][counter])
if Self.player2_ship[i][j] > 1 and Self.player2_ship[i][j] < 9:
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player2_ship[i][j] == 1:
label = Label(Self.Player2Ships , text = "*")
label.grid(row = i + 2 , column = j + 2)
elif Self.player2_ship[i][j] == 9:
label = Label(Self.Player2Ships , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.Player2Ships , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.player2_hits[i][j] = int(Content[11][counter])
if Self.player2_hits[i][j] == 1:
label = Label(Self.Player2HitsFrame , text = "X")
label.grid(row = i + 2 , column = j + 2)
elif Self.player2_hits[i][j] == 2:
label = Label(Self.Player2HitsFrame , text = "O")
label.grid(row = i + 2 , column = j + 2)
else:
label = Label(Self.Player2HitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
counter += 1
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.Player2Frame , text = "Row")
Self.rowcoordinate.pack()
Self.P2rowhit = StringVar()
Self.rowentry = Entry(Self.Player2Frame , textvariable = Self.P2rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.Player2Frame , text = "Column")
Self.columncoordinate.pack()
Self.P2columnhit = StringVar()
Self.columnentry = Entry(Self.Player2Frame , textvariable = Self.P2columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.Player2Frame , text = "HIT!" , command = Self.player2hit)
Self.hitbutton.pack()
##Player Life
Self.player2life = int(Content[12])
Self.player2_Carrierlife = int(Content[13])
Self.player2_Battleshiplife = int(Content[14])
Self.player2_Cruiserlife = int(Content[15])
Self.player2_Submarinelife = int(Content[16])
Self.player2_Destroyerlife = int(Content[17])
##Player 1 Enters his password to resume the game
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
while not Self.password == Self.P1password:
tkinter.messagebox.showerror("Wrong Password" , "Enter Your Password")
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
Self.Player1Frame.pack()
else:
tkinter.messagebox.showerror("Invalid Save File" , "Please Choose A Battleship Save File")
except FileNotFoundError:
tkinter.messagebox.showerror("Can't Open File" , "No File Selected")
else:
tkinter.messagebox.showerror("Can't Open File" , "You Must be at main menu to open file")
def SaveGame(Self):
if Self.canSave:
filepath = asksaveasfilename()
file = open(filepath , "w")
nb = Self.nbPlayers.get()
##Tag to check later that it's a save file
file.write("Battleship/")
file.write(str(nb) + "/")
##Save The Player Ships & Hits
for i in range(0 , len(Self.player1_ship)):
for j in range(0 , len(Self.player1_ship[0])):
file.write(str(Self.player1_ship[i][j]))
file.write("/")
for i in range(0 , len(Self.player1_hits)):
for j in range(0 , len(Self.player1_hits[0])):
file.write(str(Self.player1_hits[i][j]))
file.write("/")
##Save The Player's Life
file.write(str(Self.player1life) + "/")
file.write(str(Self.player1_Carrierlife) + "/")
file.write(str(Self.player1_Battleshiplife) + "/")
file.write(str(Self.player1_Cruiserlife) + "/")
file.write(str(Self.player1_Submarinelife) + "/")
file.write(str(Self.player1_Destroyerlife) + "/")
if nb == 1:
##Save The Computer Ships & Hits
for i in range(0 , len(Self.computer_ship)):
for j in range(0 , len(Self.computer_ship[0])):
file.write(str(Self.computer_ship[i][j]))
file.write("/")
for i in range(0 , len(Self.computer_hits)):
for j in range(0 , len(Self.computer_hits[0])):
file.write(str(Self.computer_hits[i][j]))
file.write("/")
##Save The Computer's Life
file.write(str(Self.computerlife) + "/")
file.write(str(Self.computer_Carrierlife) + "/")
file.write(str(Self.computer_Battleshiplife) + "/")
file.write(str(Self.computer_Cruiserlife) + "/")
file.write(str(Self.computer_Submarinelife) + "/")
file.write(str(Self.computer_Destroyerlife))
else:
##Save The Player Ships & Hits
for i in range(0 , len(Self.player2_ship)):
for j in range(0 , len(Self.player2_ship[0])):
file.write(str(Self.player2_ship[i][j]))
file.write("/")
for i in range(0 , len(Self.player2_hits)):
for j in range(0 , len(Self.player2_hits[0])):
file.write(str(Self.player2_hits[i][j]))
file.write("/")
##Save The Player's Life
file.write(str(Self.player2life) + "/")
file.write(str(Self.player2_Carrierlife) + "/")
file.write(str(Self.player2_Battleshiplife) + "/")
file.write(str(Self.player2_Cruiserlife) + "/")
file.write(str(Self.player2_Submarinelife) + "/")
file.write(str(Self.player2_Destroyerlife) + "/")
##Save Player 1 name and password
file.write(str(Self.P1name) + "/" + str(Self.P1password)+"/")
##Save Player 2 name and password
file.write(str(Self.P2name) + "/" + str(Self.P2password))
else:
tkinter.messagebox.showerror("Error Saving" , "Cannot Save")
def showHelp(Self):
if Self.Stage == 1:
helptext = "This is the main menu choose wether to:\nStart a single player game\nStart a 2 player game\nContinue a saved game"
elif Self.Stage == 2:
helptext= "Place your ship the coordinate is for the first coordinate of the ship\nCarrier has 5 lifes\nBattleship has 4 lifes\nCruiser has 3 lifes\nSubmarine has 3 lifes \nDestroyer has 2 lifes"
else:
helptext = "Choose a coordinate to hit\nYou can save the game by choosing File -> Save and save the file as a txt"
tkinter.messagebox.showinfo("Help" , helptext)
def playSFX(Self):
if not Self.SFX:
Self.SFX = True
pygame.mixer.music.play(-1)
else:
Self.SFX = False
pygame.mixer.music.stop()
##Go to main menu
def sMainMenu(Self):
Self.GameFrame.pack_forget()
Self.canSave = False
Self.canOpen = True
Self.MainMenu.pack()
Self.Stage = 1
##Start Game Mode
def start(Self):
Self.canOpen = False
Self.Stage = 2
##Single Player
if Self.nbPlayers.get() == 1:
Self.MainMenu.pack_forget()
Self.GameFrame = Frame(Self.Window)
Self.GameFrame.pack()
##Reset the hits and ships and life
Self.player1life = 17
Self.player1_Carrierlife = 5
Self.player1_Battleshiplife = 4
Self.player1_Cruiserlife = 3
Self.player1_Submarinelife = 3
Self.player1_Destroyerlife = 2
Self.computerlife = 17
Self.computer_Carrierlife = 5
Self.computer_Battleshiplife = 4
Self.computer_Cruiserlife = 3
Self.computer_Submarinelife = 3
Self.computer_Destroyerlife = 2
for i in range(0 , 10):
for j in range(0 , 10):
Self.player1_ship[i][j] = 0
Self.computer_ship[i][j] = 0
Self.player1_hits[i][j] = 0
Self.computer_hits[i][j] = 0
Self.StartSinglePlayer()
##Two Player
elif Self.nbPlayers.get() == 2:
Self.MainMenu.pack_forget()
Self.GameFrame = Frame(Self.Window)
Self.GameFrame.pack()
##Reset the hits and ships and life
Self.player1life = 17
Self.player1_Carrierlife = 5
Self.player1_Battleshiplife = 4
Self.player1_Cruiserlife = 3
Self.player1_Submarinelife = 3
Self.player1_Destroyerlife = 2
Self.player2life = 17
Self.player2_Carrierlife = 5
Self.player2_Battleshiplife = 4
Self.player2_Cruiserlife = 3
Self.player2_Submarinelife = 3
Self.player2_Destroyerlife = 2
for i in range(0 , 10):
for j in range(0 , 10):
Self.player1_ship[i][j] = 0
Self.player2_hits[i][j] = 0
Self.player1_hits[i][j] = 0
Self.player2_hits[i][j] = 0
Self.P1name = ""
Self.P1password = ""
Self.P2name = ""
Self.P2password = ""
Self.StartMultiPlayer()
##Invalid Game Mode
else:
tkinter.messagebox.showerror("Invalid Game Mode" , "No Game Mode Selected")
Self.canOpen = True
Self.Stage = 1
##Start Single Player Mode
def StartSinglePlayer(Self):
Self.PlayerFrame = Frame(Self.GameFrame)
Self.PlayerFrame.pack()
Self.PlayerShips = Frame(Self.PlayerFrame)
Self.PlayerShips.pack()
label = Label(Self.PlayerShips , text = "Player Ships")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.PlayerShips , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.PlayerShips , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player1_ship)):
for j in range(0 , len(Self.player1_ship[0])):
label = Label(Self.PlayerShips , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.ShipsFrame = Frame(Self.PlayerFrame)
Self.ShipsFrame.pack()
Self.PlacedShips = []
for i in range(0 , len(Self.Ships)):
Self.PlacedShips.append(Self.Ships[i])
Self.ShipSelection = Combobox(Self.ShipsFrame , values = Self.PlacedShips)
Self.ShipSelection.set("Select Ship")
Self.ShipSelection.grid(row = 1 , column = 1)
Self.Selectbtn = Button(Self.ShipsFrame , text = "Select" , command = Self.getship)
Self.Selectbtn.grid(row = 2 , column = 0)
##Get Ship
def getship(Self):
##Get Ship and remove it from the combobox
Ship = Self.ShipSelection.get()
##Verify that a ship was selected
if Ship == "Select Ship" or not Ship in Self.Ships:
tkinter.messagebox.showerror("No Ship Selected" , "Please A Select Ship")
else:
Self.PlacedShips.remove(Ship)
Self.ShipsFrame.pack_forget()
if Ship == "Carrier":
Self.CurrentShip = (Ship , 5)
elif Ship == "Battleship":
Self.CurrentShip = (Ship , 4)
elif Ship == "Cruiser":
Self.CurrentShip = (Ship , 3)
elif Ship == "Submarine":
Self.CurrentShip = (Ship , 3)
else:
Self.CurrentShip = (Ship , 2)
Self.GetCoordinates()
##Create a new combobox without the selected ship
Self.ShipSelection = Combobox(Self.ShipsFrame , values = Self.PlacedShips)
Self.ShipSelection.set("Select Ship")
Self.ShipSelection.grid(row = 1 , column = 1)
def GetCoordinates(Self):
Self.positionFrame = Frame(Self.PlayerFrame)
Self.positionFrame.pack()
##Row Entry
Self.rowposition = StringVar()
Self.rowLabel = Label(Self.positionFrame , text = "Row:")
Self.rowEntry = Entry(Self.positionFrame , textvariable = Self.rowposition)
##Column Entry
Self.columnposition = StringVar()
Self.columnLabel = Label(Self.positionFrame , text = "Column:")
Self.columnEntry = Entry(Self.positionFrame , textvariable = Self.columnposition)
##Get Button
Self.getbtn = Button(Self.positionFrame , text = "Enter" , command = Self.PlaceShip)
##Orientation Combo Box
Self.Orientation = Combobox(Self.positionFrame , values = Self.Orient)
Self.Orientation.set("Choose Orientation")
##Position Widgets
Self.rowLabel.grid(row = 0 , column = 0)
Self.rowEntry.grid(row = 0 , column = 1)
Self.columnLabel.grid(row = 0 , column = 2)
Self.columnEntry.grid(row = 0 , column = 3)
Self.Orientation.grid(row = 0 , column = 4)
Self.getbtn.grid(row = 1 , column = 2)
def PlaceShip(Self):
##Get the orientation
Self.ShipOrientation = Self.Orientation.get()
ValidOrientation = True
if Self.ShipOrientation == "Choose Orientation" or not Self.ShipOrientation in Self.Orient:
ValidOrientation = False
tkinter.messagebox.showerror("No Orientation Selected" , "Please Select An Orientation")
##Get the row value
ValidRow = True
Self.row = Self.rowEntry.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
if Self.ShipOrientation == "Vertical":
if Self.row < 0 or Self.row + Self.CurrentShip[1] > (len(Self.player1_ship)):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
else:
if Self.row < 0 or Self.row > (len(Self.player1_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.columnEntry.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
if ValidColumn:
if Self.ShipOrientation == "Horizontal":
if Self.column < 0 or Self.column + Self.CurrentShip[1] > (len(Self.player1_ship)):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
else:
if Self.column < 0 or Self.column > (len(Self.player1_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Places The Ship
if ValidOrientation:
if ValidColumn and ValidRow:
if not Self.ShipPositionCheck():
##Vertical Orientation
if Self.ShipOrientation == "Vertical":
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 2
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 3
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 4
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 5
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
else:
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 6
label = Label(Self.PlayerShips , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
##Horizontal Orientation
else:
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 2
label = Label(Self.PlayerShips , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 3
label = Label(Self.PlayerShips , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 4
label = Label(Self.PlayerShips , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 5
label = Label(Self.PlayerShips , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 6
label = Label(Self.PlayerShips , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
Self.positionFrame.pack_forget()
if len(Self.PlacedShips) > 0:
Self.ShipsFrame.pack()
else:
Self.StartSingleBattle()
##Check if the positions are free
def ShipPositionCheck(Self):
status = False
if Self.ShipOrientation == "Vertical":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
if not Self.player1_ship[i][Self.column] == 0:
status = True
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
if not Self.player1_ship[Self.row][i] == 0:
status = True
if status:
tkinter.messagebox.showerror("Invalid Position" , "Ship Is Colliding With Another Ship")
return status
def StartSingleBattle(Self):
Self.PlayerHitsFrame = Frame(Self.PlayerFrame)
Self.PlayerHitsFrame.pack()
label = Label(Self.PlayerHitsFrame , text = "Player Hits")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.PlayerHitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.PlayerHitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player1_hits)):
for j in range(0 , len(Self.player1_hits[0])):
label = Label(Self.PlayerHitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.PlaceComputerShips()
Self.canSave = True
Self.Stage = 3
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.PlayerFrame , text = "Row")
Self.rowcoordinate.pack()
Self.rowhit = StringVar()
Self.rowentry = Entry(Self.PlayerFrame , textvariable = Self.rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.PlayerFrame , text = "Column")
Self.columncoordinate.pack()
Self.columnhit = StringVar()
Self.columnentry = Entry(Self.PlayerFrame , textvariable = Self.columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.PlayerFrame , text = "HIT!" , command = Self.playerhit)
Self.hitbutton.pack()
##Hit Check
def playerhit(Self):
##Get the row value
ValidRow = True
Self.row = Self.rowhit.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
##Check that row ranges from A to J
if Self.row <0 or Self.row > (len(Self.computer_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.columnhit.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
ValidColumn = False
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Check Column Is Valid
if ValidColumn:
if Self.column <0 or Self.column > (len(Self.computer_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
UnhitCoordinate = True
Hit = True
ComputerHit = True
##Check Hit
if ValidColumn:
if ValidRow:
if not Self.player1_hits[Self.row][Self.column] == 0:
UnhitCoordinate = False
tkinter.messagebox.showerror("Hit Coordinate" , "This Coordinate Has Already Been Hit")
if UnhitCoordinate:
if Self.computer_ship[Self.row][Self.column] > 1:
Self.player1_hits[Self.row][Self.column] = 1
label = Label(Self.PlayerHitsFrame , text = "X")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.hit_sound)
tkinter.messagebox.showinfo("Hit Result" , "Hit")
Self.computerlife -= 1
Hit = True
##Check if ship is destroyed
if Self.computer_ship[Self.row][Self.column] == 2:
Self.computer_Carrierlife -= 1
if Self.computer_Carrierlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Carrier Destroyed")
elif Self.computer_ship[Self.row][Self.column] == 3:
Self.computer_Battleshiplife -= 1
if Self.computer_Battleshiplife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Battleship Destroyed")
elif Self.computer_ship[Self.row][Self.column] == 4:
Self.computer_Cruiserlife -= 1
if Self.computer_Cruiserlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Cruiser Destroyed")
elif Self.computer_ship[Self.row][Self.column] == 5:
Self.computer_Submarinelife -= 1
if Self.computer_Submarinelife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Submarine Destroyed")
else:
Self.computer_Destroyerlife -= 1
if Self.computer_Destroyerlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Destroyer Destroyed")
Self.computer_ship[Self.row][Self.column] = 1
else:
Self.player1_hits[Self.row][Self.column] = 2
label = Label(Self.PlayerHitsFrame , text = "O")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.miss_sound)
tkinter.messagebox.showinfo("Hit Result" , "Miss")
Hit = False
##Computer Hit
if not Hit:
while ComputerHit:
column = random.randint(0 , 9)
row = random.randint(0 , 9)
while not Self.computer_hits[row][column] == 0:
column = random.randint(0 , 9)
row = random.randint(0 , 9)
if Self.player1_ship[row][column] > 1:
Self.computer_hits[row][column] = 1
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.hit_sound)
tkinter.messagebox.showwarning("Computer Hit Result" , "Hit At: " + chr(ord("A") + row) + str(int(column + 1)))
label = Label(Self.PlayerShips , text = "*")
label.grid(row = row + 2 , column = column + 2)
Self.player1life -= 1
##Check if a ship has been destroyed
if Self.player1_ship[row][column] == 2:
Self.player1_Carrierlife -= 1
if Self.player1_Carrierlife == 0:
tkinter.messagebox.showwarning("Destroyed Ship" , "Carrier Destroyed")
elif Self.player1_ship[row][column] == 3:
Self.player1_Battleshiplife -= 1
if Self.player1_Battleshiplife == 0:
tkinter.messagebox.showwarning("Destroyed Ship" , "Battleship Destroyed")
elif Self.player1_ship[row][column] == 4:
Self.player1_Cruiserlife -= 1
if Self.player1_Cruiserlife == 0:
tkinter.messagebox.showwarning("Destroyed Ship" , "Cruiser Destroyed")
elif Self.player1_ship[row][column] == 5:
Self.player1_Submarinelife -= 1
if Self.player1_Submarinelife == 0:
tkinter.messagebox.showwarning("Destroyed Ship" , "Submarine Destroyed")
else:
Self.player1_Destroyerlife -= 1
if Self.player1_Destroyerlife == 0:
tkinter.messagebox.showwarning("Destroyed Ship" , "Destroyer Destroyed")
Self.player1_ship[row][column] = 1
else:
Self.computer_hits[row][column] = 2
Self.player1_ship[row][column] = 9
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.miss_sound)
tkinter.messagebox.showwarning("Computer Hit Result" , "Miss At: " + chr(ord("A") + row) + str(int(column + 1)))
label = Label(Self.PlayerShips , text = "O")
label.grid(row = row + 2 , column = column + 2)
ComputerHit = False
##End of Game
if Self.computerlife == 0:
tkinter.messagebox.showinfo("Game Over" , "You Won")
Self.sMainMenu()
if Self.player1life == 0:
tkinter.messagebox.showinfo("Game Over" , "You Lost")
Self.sMainMenu()
##Computer Functions
##Place Computer Ships
def PlaceComputerShips(Self):
##Carrier
Self.Computershipsize = 5
Self.ComputerShipOrientation = random.randint(1 , 2)
Self.computerrow = 0
Self.computercolumn = 0
if Self.ComputerShipOrientation == 1:
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
Self.computer_ship[i][Self.computercolumn] = 2
else:
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
Self.computer_ship[Self.computerrow][i] = 2
##Battleship
Self.Computershipsize = 4
Self.ComputerShipOrientation = random.randint(1 , 2)
Self.computerrow = 0
Self.computercolumn = 0
if Self.ComputerShipOrientation == 1:
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
Self.computer_ship[i][Self.computercolumn] = 3
else:
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
Self.computer_ship[Self.computerrow][i] = 3
##Cruiser
Self.Computershipsize = 3
Self.ComputerShipOrientation = random.randint(1 , 2)
Self.computerrow = 0
Self.computercolumn = 0
if Self.ComputerShipOrientation == 1:
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
Self.computer_ship[i][Self.computercolumn] = 4
else:
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
Self.computer_ship[Self.computerrow][i] = 4
##Submarine
Self.Computershipsize = 3
Self.ComputerShipOrientation = random.randint(1 , 2)
Self.computerrow = 0
Self.computercolumn = 0
if Self.ComputerShipOrientation == 1:
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
Self.computer_ship[i][Self.computercolumn] = 5
else:
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
Self.computer_ship[Self.computerrow][i] = 5
##Destroyer
Self.Computershipsize = 2
Self.ComputerShipOrientation = random.randint(1 , 2)
Self.computerrow = 0
Self.computercolumn = 0
if Self.ComputerShipOrientation == 1:
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , 9)
Self.computerrow = random.randint(0 , (10 - Self.Computershipsize))
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
Self.computer_ship[i][Self.computercolumn] = 6
else:
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
while Self.ComputerCheckPosition():
Self.computercolumn = random.randint(0 , (10 - Self.Computershipsize))
Self.computerrow = random.randint(0 , 9)
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
Self.computer_ship[Self.computerrow][i] = 6
def ComputerCheckPosition(Self):
##Check if space available for ship
status = False
if Self.ComputerShipOrientation == 1:
for i in range(Self.computerrow , Self.Computershipsize + Self.computerrow):
if not Self.computer_ship[i][Self.computercolumn] == 0:
status = True
else:
for i in range(Self.computercolumn , Self.Computershipsize + Self.computercolumn):
if not Self.computer_ship[Self.computerrow][i] == 0:
status = True
return status
##Multiplayer
def StartMultiPlayer(Self):
##Ask Player1 Name And Password:
InvalidName = True
while InvalidName:
try:
Self.P1name = tkinter.simpledialog.askstring("P1 Name" , "Enter Your Name")
while len(Self.P1name) < 1:
Self.P1name = tkinter.simpledialog.askstring("P1 Name" , "Enter Your Name")
InvalidName = False
except:
tkinter.messagebox.showerror("Invalid Name" , "No Name Entered")
Self.Checkpass = 1
invalidpass = True
while invalidpass:
Self.P1password = tkinter.simpledialog.askstring("Enter Password" , "Your password must follow these conditions:\n1- 1 Letter between [a-z].\2- 1 Letter between [A-Z].\n3- 1 Number between [0-9].\n4- 1 Special character from [$#@?*].\n5- Minimum length 6 characters.\n6- Maximum length 16 characters.")
try:
while len(Self.P1password) < 6 or len(Self.P1password) > 16:
if len(Self.P1password) < 6:
tkinter.messagebox.showerror("Invalid Password" , "Password is too short")
Self.P1password = tkinter.simpledialog.askstring("Enter Password" , "At least 6 characters long")
else:
tkinter.messagebox.showerror("Invalid Password" , "Password is too long")
Self.P1password = tkinter.simpledialog.askstring("Enter Password" , "Maximum 16 characters long")
invalidpass = Self.CheckPassword()
except TypeError:
tkinter.messagebox.showerror("Invalid Password" , "No Password Entered")
Self.Player1Frame = Frame(Self.GameFrame)
Self.Player1Frame.pack()
Self.Player1Ships = Frame(Self.Player1Frame)
Self.Player1Ships.pack()
label = Label(Self.Player1Ships , text = Self.P1name + "'s Ships:")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player1Ships , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player1Ships , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player1_ship)):
for j in range(0 , len(Self.player1_ship[0])):
label = Label(Self.Player1Ships , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.P1ShipsFrame = Frame(Self.Player1Frame)
Self.P1ShipsFrame.pack()
Self.P1PlacedShips = []
for i in range(0 , len(Self.Ships)):
Self.P1PlacedShips.append(Self.Ships[i])
Self.P1ShipSelection = Combobox(Self.P1ShipsFrame , values = Self.P1PlacedShips)
Self.P1ShipSelection.set("Select Ship")
Self.P1ShipSelection.grid(row = 1 , column = 1)
Self.P1Selectbtn = Button(Self.P1ShipsFrame , text = "Select" , command = Self.P1getship)
Self.P1Selectbtn.grid(row = 2 , column = 0)
def P1getship(Self):
##Get Ship and remove it from the combobox
Ship = Self.P1ShipSelection.get()
##Verify that a ship was selected
if Ship == "Select Ship" or not Ship in Self.Ships:
tkinter.messagebox.showerror("No Ship Selected" , "Please A Select Ship")
else:
Self.P1PlacedShips.remove(Ship)
Self.P1ShipsFrame.pack_forget()
if Ship == "Carrier":
Self.CurrentShip = (Ship , 5)
elif Ship == "Battleship":
Self.CurrentShip = (Ship , 4)
elif Ship == "Cruiser":
Self.CurrentShip = (Ship , 3)
elif Ship == "Submarine":
Self.CurrentShip = (Ship , 3)
else:
Self.CurrentShip = (Ship , 2)
Self.P1GetCoordinates()
##Create a new combobox without the selected ship
Self.P1ShipSelection = Combobox(Self.P1ShipsFrame , values = Self.P1PlacedShips)
Self.P1ShipSelection.set("Select Ship")
Self.P1ShipSelection.grid(row = 1 , column = 1)
def P1GetCoordinates(Self):
Self.P1positionFrame = Frame(Self.Player1Frame)
Self.P1positionFrame.pack()
##Row Entry
Self.rowposition = StringVar()
Self.rowLabel = Label(Self.P1positionFrame , text = "Row:")
Self.rowEntry = Entry(Self.P1positionFrame , textvariable = Self.rowposition)
##Column Entry
Self.columnposition = StringVar()
Self.columnLabel = Label(Self.P1positionFrame , text = "Column:")
Self.columnEntry = Entry(Self.P1positionFrame , textvariable = Self.columnposition)
##Get Button
Self.getbtn = Button(Self.P1positionFrame , text = "Enter" , command = Self.P1PlaceShip)
##Orientation Combo Box
Self.Orientation = Combobox(Self.P1positionFrame , values = Self.Orient)
Self.Orientation.set("Choose Orientation")
##Position Widgets
Self.rowLabel.grid(row = 0 , column = 0)
Self.rowEntry.grid(row = 0 , column = 1)
Self.columnLabel.grid(row = 0 , column = 2)
Self.columnEntry.grid(row = 0 , column = 3)
Self.Orientation.grid(row = 0 , column = 4)
Self.getbtn.grid(row = 1 , column = 2)
def P1PlaceShip(Self):
##Get the orientation
Self.ShipOrientation = Self.Orientation.get()
ValidOrientation = True
if Self.ShipOrientation == "Choose Orientation" or not Self.ShipOrientation in Self.Orient:
ValidOrientation = False
tkinter.messagebox.showerror("No Orientation Selected" , "Please Select An Orientation")
##Get the row value
ValidRow = True
Self.row = Self.rowEntry.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
if Self.ShipOrientation == "Vertical":
if Self.row < 0 or Self.row + Self.CurrentShip[1] > (len(Self.player1_ship)):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
else:
if Self.row < 0 or Self.row > (len(Self.player1_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.columnEntry.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
if ValidColumn:
if Self.ShipOrientation == "Horizontal":
if Self.column < 0 or Self.column + Self.CurrentShip[1] > (len(Self.player1_ship)):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
else:
if Self.column < 0 or Self.column > (len(Self.player1_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Places The Ship
if ValidOrientation:
if ValidColumn and ValidRow:
if not Self.P1ShipPositionCheck():
##Vertical Orientation
if Self.ShipOrientation == "Vertical":
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 2
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 3
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 4
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 5
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
else:
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player1_ship[i][Self.column] = 6
label = Label(Self.Player1Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
##Horizontal Orientation
else:
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 2
label = Label(Self.Player1Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 3
label = Label(Self.Player1Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 4
label = Label(Self.Player1Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 5
label = Label(Self.Player1Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player1_ship[Self.row][i] = 6
label = Label(Self.Player1Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
Self.P1positionFrame.pack_forget()
if len(Self.P1PlacedShips) > 0:
Self.P1ShipsFrame.pack()
else:
Self.Player1Frame.pack_forget()
Self.P2Start()
def P1ShipPositionCheck(Self):
status = False
if Self.ShipOrientation == "Vertical":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
if not Self.player1_ship[i][Self.column] == 0:
status = True
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
if not Self.player1_ship[Self.row][i] == 0:
status = True
if status:
tkinter.messagebox.showerror("Invalid Position" , "Ship Is Colliding With Another Ship")
return status
def P2Start(Self):
##Ask Player2 Name And Password:
InvalidName = True
while InvalidName:
try:
Self.P2name = tkinter.simpledialog.askstring("P2 Name" , "Enter Your Name")
while len(Self.P2name) < 1:
Self.P2name = tkinter.simpledialog.askstring("P2 Name" , "Enter Your Name")
InvalidName = False
except:
tkinter.messagebox.showerror("Invalid Name" , "No Name Entered")
Self.Checkpass = 2
invalidpass = True
while invalidpass:
Self.P2password = tkinter.simpledialog.askstring("Enter Password" , "Your password must follow these conditions:\n1- 1 Letter between [a-z].\2- 1 Letter between [A-Z].\n3- 1 Number between [0-9].\n4- 1 Special character from [$#@?*].\n5- Minimum length 6 characters.\n6- Maximum length 16 characters.")
try:
while len(Self.P2password) < 6 or len(Self.P2password) > 16:
if len(Self.P2password) < 6:
tkinter.messagebox.showerror("Invalid Password" , "Password is too short")
Self.P2password = tkinter.simpledialog.askstring("Enter Password" , "At least 6 characters long")
else:
tkinter.messagebox.showerror("Invalid Password" , "Password is too long")
Self.P2password = tkinter.simpledialog.askstring("Enter Password" , "Maximum 16 characters long")
invalidpass = Self.CheckPassword()
except TypeError:
tkinter.messagebox.showerror("Invalid Password" , "No Password Entered")
Self.Player2Frame = Frame(Self.GameFrame)
Self.Player2Frame.pack()
Self.Player2Ships = Frame(Self.Player2Frame)
Self.Player2Ships.pack()
label = Label(Self.Player2Ships , text = Self.P2name + "'s Ships :")
label.grid(row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player2Ships , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player2Ships , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player2_ship)):
for j in range(0 , len(Self.player2_ship[0])):
label = Label(Self.Player2Ships , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.P2ShipsFrame = Frame(Self.Player2Frame)
Self.P2ShipsFrame.pack()
Self.P2PlacedShips = []
for i in range(0 , len(Self.Ships)):
Self.P2PlacedShips.append(Self.Ships[i])
Self.P2ShipSelection = Combobox(Self.P2ShipsFrame , values = Self.P2PlacedShips)
Self.P2ShipSelection.set("Select Ship")
Self.P2ShipSelection.grid(row = 1 , column = 1)
Self.P2Selectbtn = Button(Self.P2ShipsFrame , text = "Select" , command = Self.P2getship)
Self.P2Selectbtn.grid(row = 2 , column = 0)
def P2getship(Self):
##Get Ship and remove it from the combobox
Ship = Self.P2ShipSelection.get()
##Verify that a ship was selected
if Ship == "Select Ship" or not Ship in Self.Ships:
tkinter.messagebox.showerror("No Ship Selected" , "Please A Select Ship")
else:
Self.P2PlacedShips.remove(Ship)
Self.P2ShipsFrame.pack_forget()
if Ship == "Carrier":
Self.CurrentShip = (Ship , 5)
elif Ship == "Battleship":
Self.CurrentShip = (Ship , 4)
elif Ship == "Cruiser":
Self.CurrentShip = (Ship , 3)
elif Ship == "Submarine":
Self.CurrentShip = (Ship , 3)
else:
Self.CurrentShip = (Ship , 2)
Self.P2GetCoordinates()
##Create a new combobox without the selected ship
Self.P2ShipSelection = Combobox(Self.P2ShipsFrame , values = Self.P2PlacedShips)
Self.P2ShipSelection.set("Select Ship")
Self.P2ShipSelection.grid(row = 1 , column = 1)
def P2GetCoordinates(Self):
Self.P2positionFrame = Frame(Self.Player2Frame)
Self.P2positionFrame.pack()
##Row Entry
Self.rowposition = StringVar()
Self.rowLabel = Label(Self.P2positionFrame , text = "Row:")
Self.rowEntry = Entry(Self.P2positionFrame , textvariable = Self.rowposition)
##Column Entry
Self.columnposition = StringVar()
Self.columnLabel = Label(Self.P2positionFrame , text = "Column:")
Self.columnEntry = Entry(Self.P2positionFrame , textvariable = Self.columnposition)
##Get Button
Self.getbtn = Button(Self.P2positionFrame , text = "Enter" , command = Self.P2PlaceShip)
##Orientation Combo Box
Self.Orientation = Combobox(Self.P2positionFrame , values = Self.Orient)
Self.Orientation.set("Choose Orientation")
##Position Widgets
Self.rowLabel.grid(row = 0 , column = 0)
Self.rowEntry.grid(row = 0 , column = 1)
Self.columnLabel.grid(row = 0 , column = 2)
Self.columnEntry.grid(row = 0 , column = 3)
Self.Orientation.grid(row = 0 , column = 4)
Self.getbtn.grid(row = 1 , column = 2)
def P2PlaceShip(Self):
##Get the orientation
Self.ShipOrientation = Self.Orientation.get()
ValidOrientation = True
if Self.ShipOrientation == "Choose Orientation" or not Self.ShipOrientation in Self.Orient:
ValidOrientation = False
tkinter.messagebox.showerror("No Orientation Selected" , "Please Select An Orientation")
##Get the row value
ValidRow = True
Self.row = Self.rowEntry.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
if Self.ShipOrientation == "Vertical":
if Self.row < 0 or Self.row + Self.CurrentShip[1] > (len(Self.player2_ship)):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
else:
if Self.row < 0 or Self.row > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.columnEntry.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
if ValidColumn:
if Self.ShipOrientation == "Horizontal":
if Self.column < 0 or Self.column + Self.CurrentShip[1] > (len(Self.player2_ship)):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
else:
if Self.column < 0 or Self.column > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Places The Ship
if ValidOrientation:
if ValidColumn and ValidRow:
if not Self.P2ShipPositionCheck():
##Vertical Orientation
if Self.ShipOrientation == "Vertical":
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player2_ship[i][Self.column] = 2
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player2_ship[i][Self.column] = 3
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player2_ship[i][Self.column] = 4
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player2_ship[i][Self.column] = 5
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
else:
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
Self.player2_ship[i][Self.column] = 6
label = Label(Self.Player2Ships , text = "X")
label.grid(row = i + 2 , column = Self.column + 2)
##Horizontal Orientation
else:
if Self.CurrentShip[0] == "Carrier":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player2_ship[Self.row][i] = 2
label = Label(Self.Player2Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Battleship":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player2_ship[Self.row][i] = 3
label = Label(Self.Player2Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Cruiser":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player2_ship[Self.row][i] = 4
label = Label(Self.Player2Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
elif Self.CurrentShip[0] == "Submarine":
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player2_ship[Self.row][i] = 5
label = Label(Self.Player2Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
Self.player2_ship[Self.row][i] = 6
label = Label(Self.Player2Ships , text = "X")
label.grid(row = Self.row + 2 , column = i + 2)
Self.P2positionFrame.pack_forget()
if len(Self.P2PlacedShips) > 0:
Self.P2ShipsFrame.pack()
else:
Self.Player2Frame.pack_forget()
Self.StartMultiPlayerBattle()
def P2ShipPositionCheck(Self):
status = False
if Self.ShipOrientation == "Vertical":
for i in range(Self.row , Self.CurrentShip[1] + Self.row):
if not Self.player2_ship[i][Self.column] == 0:
status = True
else:
for i in range(Self.column , Self.CurrentShip[1] + Self.column):
if not Self.player2_ship[Self.row][i] == 0:
status = True
if status:
tkinter.messagebox.showerror("Invalid Position" , "Ship Is Colliding With Another Ship")
return status
def StartMultiPlayerBattle(Self):
##Player 1
Self.Player1HitsFrame = Frame(Self.Player1Frame)
Self.Player1HitsFrame.pack()
label = Label(Self.Player1HitsFrame , text = Self.P1name+ "'s: Hits")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player1HitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player1HitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player1_hits)):
for j in range(0 , len(Self.player1_hits[0])):
label = Label(Self.Player1HitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
Self.canSave = True
Self.Stage = 3
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.Player1Frame , text = "Row")
Self.rowcoordinate.pack()
Self.P1rowhit = StringVar()
Self.rowentry = Entry(Self.Player1Frame , textvariable = Self.P1rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.Player1Frame , text = "Column")
Self.columncoordinate.pack()
Self.P1columnhit = StringVar()
Self.columnentry = Entry(Self.Player1Frame , textvariable = Self.P1columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.Player1Frame , text = "HIT!" , command = Self.player1hit)
Self.hitbutton.pack()
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
while not Self.password == Self.P1password:
tkinter.messagebox.showerror("Wrong Password" , "Enter Your Password")
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
Self.Player1Frame.pack()
##Player 2
Self.Player2HitsFrame = Frame(Self.Player2Frame)
Self.Player2HitsFrame.pack()
label = Label(Self.Player2HitsFrame , text = Self.P2name+ "'s: Hits")
label.grid( row = 0 , column = 0)
##Place the column coordinates
for i in range(1 , 11):
label = Label(Self.Player2HitsFrame , text = str(i))
label.grid(row = 1 , column = i + 1)
##Place the row coordinates
lrow = 1
for i in range(ord("A") , (ord("A") + 10)):
label = Label(Self.Player2HitsFrame , text = chr(i))
label.grid(row = lrow + 1 , column = 1)
lrow += 1
for i in range(0 , len(Self.player2_hits)):
for j in range(0 , len(Self.player2_hits[0])):
label = Label(Self.Player2HitsFrame , text = " ")
label.grid(row = i + 2 , column = j + 2)
##Enter Hit Coordinates
Self.rowcoordinate = Label(Self.Player2Frame , text = "Row")
Self.rowcoordinate.pack()
Self.P2rowhit = StringVar()
Self.rowentry = Entry(Self.Player2Frame , textvariable = Self.P2rowhit)
Self.rowentry.pack()
Self.columncoordinate = Label(Self.Player2Frame , text = "Column")
Self.columncoordinate.pack()
Self.P2columnhit = StringVar()
Self.columnentry = Entry(Self.Player2Frame , textvariable = Self.P2columnhit)
Self.columnentry.pack()
Self.hitbutton = Button(Self.Player2Frame , text = "HIT!" , command = Self.player2hit)
Self.hitbutton.pack()
def player1hit(Self):
##Get the row value
ValidRow = True
Self.row = Self.P1rowhit.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
##Check that row ranges from A to J
if Self.row <0 or Self.row > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.P1columnhit.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
ValidColumn = False
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Check Column Is Valid
if ValidColumn:
if Self.column <0 or Self.column > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
UnhitCoordinate = True
Hit = True
##Check Hit
if ValidColumn:
if ValidRow:
if not Self.player1_hits[Self.row][Self.column] == 0:
UnhitCoordinate = False
tkinter.messagebox.showerror("Hit Coordinate" , "This Coordinate Has Already Been Hit")
if UnhitCoordinate:
if Self.player2_ship[Self.row][Self.column] > 1:
Self.player1_hits[Self.row][Self.column] = 1
label = Label(Self.Player1HitsFrame , text = "X")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.hit_sound)
tkinter.messagebox.showinfo("Hit Result" , "Hit")
label = Label(Self.Player2Ships , text = "*")
label.grid(row = Self.row + 2 , column = Self.column + 2)
Self.player2life -= 1
Hit = True
##Check if ship is destroyed
if Self.player2_ship[Self.row][Self.column] == 2:
Self.player2_Carrierlife -= 1
if Self.player2_Carrierlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Carrier Destroyed")
elif Self.player2_ship[Self.row][Self.column] == 3:
Self.player2_Battleshiplife -= 1
if Self.player2_Battleshiplife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Battleship Destroyed")
elif Self.player2_ship[Self.row][Self.column] == 4:
Self.player2_Cruiserlife -= 1
if Self.player2_Cruiserlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Cruiser Destroyed")
elif Self.player2_ship[Self.row][Self.column] == 5:
Self.player2_Submarinelife -= 1
if Self.player2_Submarinelife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Submarine Destroyed")
else:
Self.player2_Destroyerlife -= 1
if Self.player2_Destroyerlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Destroyer Destroyed")
Self.player2_ship[Self.row][Self.column] = 1
else:
Self.player1_hits[Self.row][Self.column] = 2
label = Label(Self.Player1HitsFrame , text = "O")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.miss_sound)
tkinter.messagebox.showinfo("Hit Result" , "Miss")
Hit = False
Self.player2_ship[Self.row][Self.column] = 9
label = Label(Self.Player2Ships , text = "O")
label.grid(row = Self.row + 2 , column = Self.column + 2)
if Self.player2life == 0:
tkinter.messagebox.showinfo("End Of Game" , Self.P1name + " Won")
Self.sMainMenu()
if not Hit:
Self.Player1Frame.pack_forget()
Self.canSave = False
Self.password = tkinter.simpledialog.askstring("Password" , Self.P2name + " Enter Your Password:")
while not Self.password == Self.P2password:
tkinter.messagebox.showerror("Wrong Password" , "Enter Your Password")
Self.password = tkinter.simpledialog.askstring("Password" , Self.P2name + " Enter Your Password:")
Self.Player2Frame.pack()
def player2hit(Self):
##Get the row value
ValidRow = True
Self.row = Self.P2rowhit.get()
##Verify only one letter is entered
if not len(Self.row) == 1:
if len(Self.row) > 1:
tkinter.messagebox.showerror("Wrong Entry" , "Please Enter Only One Letter")
ValidRow = False
else:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Letter")
ValidRow = False
else:
if Self.row.isupper():
Self.row = (ord(Self.row) - ord("A"))
else:
Self.row = (ord(Self.row) - ord("a"))
##Check that row ranges from A to J
if Self.row <0 or Self.row > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Row" , "Enter Valid Row")
ValidRow = False
##Get the column value
ValidColumn = True
Self.column = Self.P2columnhit.get()
##Verify the entry
if len(Self.column) == 0:
tkinter.messagebox.showerror("No Entry" , "Please Enter A Number")
ValidColumn = False
else:
try:
Self.column = (int(Self.column) - 1)
except ValueError:
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
##Check Column Is Valid
if ValidColumn:
if Self.column <0 or Self.column > (len(Self.player2_ship) - 1):
tkinter.messagebox.showerror("Invalid Column" , "Enter Valid Column")
ValidColumn = False
UnhitCoordinate = True
Hit = True
##Check Hit
if ValidColumn:
if ValidRow:
if not Self.player2_hits[Self.row][Self.column] == 0:
UnhitCoordinate = False
tkinter.messagebox.showerror("Hit Coordinate" , "This Coordinate Has Already Been Hit")
if UnhitCoordinate:
if Self.player1_ship[Self.row][Self.column] > 1:
Self.player2_hits[Self.row][Self.column] = 1
label = Label(Self.Player2HitsFrame , text = "X")
label.grid(row = Self.row + 2 , column = Self.column + 2)
label = Label(Self.Player1Ships , text = "*")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.hit_sound)
tkinter.messagebox.showinfo("Hit Result" , "Hit")
Self.player1life -= 1
Hit = True
##Check if ship is destroyed
if Self.player1_ship[Self.row][Self.column] == 2:
Self.player1_Carrierlife -= 1
if Self.player1_Carrierlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Carrier Destroyed")
elif Self.player1_ship[Self.row][Self.column] == 3:
Self.player1_Battleshiplife -= 1
if Self.player1_Battleshiplife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Battleship Destroyed")
elif Self.player1_ship[Self.row][Self.column] == 4:
Self.player1_Cruiserlife -= 1
if Self.player1_Cruiserlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Cruiser Destroyed")
elif Self.player1_ship[Self.row][Self.column] == 5:
Self.player1_Submarinelife -= 1
if Self.player1_Submarinelife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Submarine Destroyed")
else:
Self.player1_Destroyerlife -= 1
if Self.player1_Destroyerlife == 0:
tkinter.messagebox.showinfo("Destroyed Ship" , "Destroyer Destroyed")
Self.player1_ship[Self.row][Self.column] = 1
else:
Self.player2_hits[Self.row][Self.column] = 2
label = Label(Self.Player2HitsFrame , text = "O")
label.grid(row = Self.row + 2 , column = Self.column + 2)
##Play Sound Effects
if Self.SFX:
pygame.mixer.Sound.play(Self.miss_sound)
tkinter.messagebox.showinfo("Hit Result" , "Miss")
Hit = False
Self.player1_ship[Self.row][Self.column] = 9
label = Label(Self.Player1Ships , text = "O")
label.grid(row = Self.row + 2 , column = Self.column + 2)
if Self.player1life == 0:
tkinter.messagebox.showinfo("End Of Game" , Self.P2name + " Won")
Self.sMainMenu()
if not Hit:
Self.canSave = True
Self.Player2Frame.pack_forget()
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
while not Self.password == Self.P1password:
tkinter.messagebox.showerror("Wrong Password" , "Enter Your Password")
Self.password = tkinter.simpledialog.askstring("Password" , Self.P1name + " Enter Your Password:")
Self.Player1Frame.pack()
def CheckPassword(Self):
cond1 = False
cond2 = False
cond3 = False
cond4 = False
Cond = [cond1 , cond2 , cond3 , cond4]
invalidpass = False
SpecialCharacters = ("$" , "#" , "@" , "?" , "*")
for x in range(0 , len(Cond)):
Cond[x] = False
if Self.Checkpass == 1:
for x in range(0 , len(Self.P1password)):
#One condition can be met per pass if a condition is met change to false
if not Cond[0]:
if ord(Self.P1password[x]) in range(ord("a") , (ord("z") + 1)):
Cond[0] = True
if not Cond[1]:
if ord(Self.P1password[x]) in range(ord("A") , (ord("Z") + 1)):
Cond[1] = True
if not Cond[2]:
try:
if int(Self.P1password[x]) in range(0 , 10):
Cond[2] = True
except ValueError:
a = 1
if not Cond[3]:
if Self.P1password[x] in SpecialCharacters:
Cond[3] = True
else:
for x in range(0 , len(Self.P2password)):
#One condition can be met per pass if a condition is met change to false
if not Cond[0]:
if ord(Self.P2password[x]) in range(ord("a") , (ord("z") + 1)):
Cond[0] = True
if not Cond[1]:
if ord(Self.P2password[x]) in range(ord("A") , (ord("Z") + 1)):
Cond[1] = True
if not Cond[2]:
try:
if int(Self.P2password[x]) in range(0 , 10):
Cond[2] = True
except ValueError:
a = 1
if not Cond[3]:
if Self.P2password[x] in SpecialCharacters:
Cond[3] = True
for x in range(0 , len(Cond)):
if not Cond[x]:
invalidpass = True
if invalidpass:
tkinter.messagebox.showerror("Invalid Password" , "Your password is invalid!")
if not Cond[0]:
tkinter.messagebox.showerror("Invalid Password" , "Your password doesn't contain a lower case letter")
if not Cond[1]:
tkinter.messagebox.showerror("Invalid Password" , "Your password doesn't contain an upper case letter")
if not Cond[2]:
tkinter.messagebox.showerror("Invalid Password" , "Your password doesn't contain a digit")
if not Cond[3]:
tkinter.messagebox.showerror("Invalid Password" , "Your password doesn't contain a special character")
return invalidpass
a = BattleShip()
| 40.269438 | 314 | 0.488539 | 10,142 | 104,620 | 5.005916 | 0.041116 | 0.023991 | 0.028954 | 0.020583 | 0.865157 | 0.828777 | 0.812192 | 0.791038 | 0.777467 | 0.758302 | 0 | 0.027628 | 0.412541 | 104,620 | 2,597 | 315 | 40.284944 | 0.798438 | 0.036991 | 0 | 0.761593 | 0 | 0.002973 | 0.062078 | 0.00053 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017836 | false | 0.041023 | 0.004756 | 0 | 0.026159 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
db8c2fbafc4c38d0ac6966d6be6e129a522cb713 | 141 | py | Python | handle_text/__init__.py | guoweikuang/weibo_project | 38cb2a6d72a16f2f8c1714e83564c833f8e4af0c | [
"Apache-2.0"
] | 4 | 2019-03-25T08:47:22.000Z | 2021-03-16T02:39:29.000Z | handle_text/__init__.py | guoweikuang/weibo_project | 38cb2a6d72a16f2f8c1714e83564c833f8e4af0c | [
"Apache-2.0"
] | 1 | 2020-01-06T03:37:46.000Z | 2020-01-06T03:37:46.000Z | handle_text/__init__.py | guoweikuang/weibo_project | 38cb2a6d72a16f2f8c1714e83564c833f8e4af0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import jieba
from common.config import get_jieba_dict_path
jieba.load_userdict(get_jieba_dict_path("user_dict.txt")) | 28.2 | 57 | 0.787234 | 23 | 141 | 4.478261 | 0.652174 | 0.15534 | 0.23301 | 0.31068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007752 | 0.085106 | 141 | 5 | 57 | 28.2 | 0.790698 | 0.148936 | 0 | 0 | 0 | 0 | 0.109244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
9163c346a2bedc3c6ce51f9ead00d06beb7930df | 73,759 | py | Python | keystone/tests/unit/test_limits.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 615 | 2015-01-07T12:32:52.000Z | 2022-03-24T03:49:47.000Z | keystone/tests/unit/test_limits.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 11 | 2015-04-13T18:52:40.000Z | 2021-08-21T06:13:05.000Z | keystone/tests/unit/test_limits.py | 10088/keystone | 1561da645b6512decdc0d307d2ec79a8a4c9cc87 | [
"Apache-2.0"
] | 696 | 2015-01-15T00:31:07.000Z | 2022-03-16T09:56:00.000Z | # Copyright 2018 Huawei
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import http.client
import uuid
from keystone.common import provider_api
from keystone.common.validation import validators
import keystone.conf
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class LimitModelTestCase(test_v3.RestfulTestCase):
def test_get_default_limit_model_response_schema(self):
schema = {
'type': 'object',
'properties': {
'model': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'description': {'type': 'string'}
},
'required': ['name', 'description'],
'additionalProperties': False,
},
},
'required': ['model'],
'additionalProperties': False,
}
validator = validators.SchemaValidator(schema)
response = self.get('/limits/model')
validator.validate(response.json_body)
def test_head_limit_model(self):
self.head('/limits/model', expected_status=http.client.OK)
def test_get_limit_model_returns_default_model(self):
response = self.get('/limits/model')
model = response.result
expected = {
'model': {
'name': 'flat',
'description': (
'Limit enforcement and validation does not take project '
'hierarchy into consideration.'
)
}
}
self.assertDictEqual(expected, model)
def test_get_limit_model_without_token_fails(self):
self.get(
'/limits/model', noauth=True,
expected_status=http.client.UNAUTHORIZED
)
def test_head_limit_model_without_token_fails(self):
self.head(
'/limits/model', noauth=True,
expected_status=http.client.UNAUTHORIZED
)
class RegisteredLimitsTestCase(test_v3.RestfulTestCase):
"""Test registered_limits CRUD."""
def setUp(self):
super(RegisteredLimitsTestCase, self).setUp()
# Most of these tests require system-scoped tokens. Let's have one on
# hand so that we can use it in tests when we need it.
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.role_id
)
self.system_admin_token = self.get_system_scoped_token()
# There is already a sample service and region created from
# load_sample_data() but we're going to create another service and
# region for specific testing purposes.
response = self.post('/regions', body={'region': {}})
self.region2 = response.json_body['region']
self.region_id2 = self.region2['id']
service_ref = {'service': {
'name': uuid.uuid4().hex,
'enabled': True,
'type': 'type2'}}
response = self.post('/services', body=service_ref)
self.service2 = response.json_body['service']
self.service_id2 = self.service2['id']
def test_create_registered_limit(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
for key in ['service_id', 'region_id', 'resource_name',
'default_limit', 'description']:
self.assertEqual(registered_limits[0][key], ref[key])
def test_create_registered_limit_without_region(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
for key in ['service_id', 'resource_name', 'default_limit']:
self.assertEqual(registered_limits[0][key], ref[key])
self.assertIsNone(registered_limits[0].get('region_id'))
def test_create_registered_without_description(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
ref.pop('description')
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
for key in ['service_id', 'region_id', 'resource_name',
'default_limit']:
self.assertEqual(registered_limits[0][key], ref[key])
self.assertIsNone(registered_limits[0]['description'])
def test_create_multi_registered_limit(self):
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_registered_limit_ref(service_id=self.service_id,
resource_name='snapshot')
r = self.post(
'/registered_limits',
body={'registered_limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
for key in ['service_id', 'resource_name', 'default_limit']:
self.assertEqual(registered_limits[0][key], ref1[key])
self.assertEqual(registered_limits[1][key], ref2[key])
self.assertEqual(registered_limits[0]['region_id'], ref1['region_id'])
self.assertIsNone(registered_limits[1].get('region_id'))
def test_create_registered_limit_return_count(self):
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref1]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
self.assertEqual(1, len(registered_limits))
ref2 = unit.new_registered_limit_ref(service_id=self.service_id2,
region_id=self.region_id2)
ref3 = unit.new_registered_limit_ref(service_id=self.service_id2)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref2, ref3]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
registered_limits = r.result['registered_limits']
self.assertEqual(2, len(registered_limits))
def test_create_registered_limit_with_invalid_input(self):
ref1 = unit.new_registered_limit_ref()
ref2 = unit.new_registered_limit_ref(default_limit='not_int')
ref3 = unit.new_registered_limit_ref(resource_name=123)
ref4 = unit.new_registered_limit_ref(region_id='fake_region')
for input_limit in [ref1, ref2, ref3, ref4]:
self.post(
'/registered_limits',
body={'registered_limits': [input_limit]},
token=self.system_admin_token,
expected_status=http.client.BAD_REQUEST)
def test_create_registered_limit_duplicate(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CONFLICT)
def test_update_registered_limit(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'service_id': self.service_id2,
'region_id': self.region_id2,
'resource_name': 'snapshot',
'default_limit': 5,
'description': 'test description'
}
r = self.patch(
'/registered_limits/%s' % r.result['registered_limits'][0]['id'],
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
new_registered_limits = r.result['registered_limit']
self.assertEqual(new_registered_limits['service_id'], self.service_id2)
self.assertEqual(new_registered_limits['region_id'], self.region_id2)
self.assertEqual(new_registered_limits['resource_name'], 'snapshot')
self.assertEqual(new_registered_limits['default_limit'], 5)
self.assertEqual(new_registered_limits['description'],
'test description')
def test_update_registered_limit_region_failed(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
resource_name='volume',
default_limit=10,
description='test description')
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'region_id': self.region_id,
}
registered_limit_id = r.result['registered_limits'][0]['id']
r = self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
new_registered_limits = r.result['registered_limit']
self.assertEqual(self.region_id, new_registered_limits['region_id'])
update_ref['region_id'] = ''
r = self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.BAD_REQUEST)
def test_update_registered_limit_description(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'description': 'test description'
}
registered_limit_id = r.result['registered_limits'][0]['id']
r = self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
new_registered_limits = r.result['registered_limit']
self.assertEqual(new_registered_limits['description'],
'test description')
update_ref['description'] = ''
r = self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
new_registered_limits = r.result['registered_limit']
self.assertEqual(new_registered_limits['description'], '')
def test_update_registered_limit_region_id_to_none(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'region_id': None
}
registered_limit_id = r.result['registered_limits'][0]['id']
r = self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
self.assertIsNone(r.result['registered_limit']['region_id'])
def test_update_registered_limit_region_id_to_none_conflict(self):
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
resource_name='volume',
default_limit=10)
ref2 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
self.post(
'/registered_limits',
body={'registered_limits': [ref1]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'region_id': None
}
registered_limit_id = r.result['registered_limits'][0]['id']
# There is a registered limit with "service_id=self.service_id,
# region_id=None" already. So update ref2's region_id to None will
# raise 409 Conflict Error.
self.patch(
'/registered_limits/%s' % registered_limit_id,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.CONFLICT)
def test_update_registered_limit_not_found(self):
update_ref = {
'service_id': self.service_id,
'region_id': self.region_id,
'resource_name': 'snapshot',
'default_limit': 5
}
self.patch(
'/registered_limits/%s' % uuid.uuid4().hex,
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.NOT_FOUND)
def test_update_registered_limit_with_invalid_input(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
reg_id = r.result['registered_limits'][0]['id']
update_ref1 = unit.new_registered_limit_ref(service_id='fake_id')
update_ref2 = unit.new_registered_limit_ref(default_limit='not_int')
update_ref3 = unit.new_registered_limit_ref(resource_name=123)
update_ref4 = unit.new_registered_limit_ref(region_id='fake_region')
update_ref5 = unit.new_registered_limit_ref(description=123)
for input_limit in [update_ref1, update_ref2, update_ref3,
update_ref4, update_ref5]:
self.patch(
'/registered_limits/%s' % reg_id,
body={'registered_limit': input_limit},
token=self.system_admin_token,
expected_status=http.client.BAD_REQUEST)
def test_update_registered_limit_with_referenced_limit(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'service_id': self.service_id2,
'region_id': self.region_id2,
'resource_name': 'snapshot',
'default_limit': 5
}
self.patch(
'/registered_limits/%s' % r.result['registered_limits'][0]['id'],
body={'registered_limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_list_registered_limit(self):
r = self.get(
'/registered_limits',
expected_status=http.client.OK)
self.assertEqual([], r.result.get('registered_limits'))
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
resource_name='test_resource',
region_id=self.region_id)
ref2 = unit.new_registered_limit_ref(service_id=self.service_id2,
resource_name='test_resource',
region_id=self.region_id2)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['registered_limits'][0]['id']
r = self.get(
'/registered_limits',
expected_status=http.client.OK)
registered_limits = r.result['registered_limits']
self.assertEqual(len(registered_limits), 2)
for key in ['service_id', 'region_id', 'resource_name',
'default_limit']:
if registered_limits[0]['id'] == id1:
self.assertEqual(registered_limits[0][key], ref1[key])
self.assertEqual(registered_limits[1][key], ref2[key])
break
self.assertEqual(registered_limits[1][key], ref1[key])
self.assertEqual(registered_limits[0][key], ref2[key])
r = self.get(
'/registered_limits?service_id=%s' % self.service_id,
expected_status=http.client.OK)
registered_limits = r.result['registered_limits']
self.assertEqual(len(registered_limits), 1)
for key in ['service_id', 'region_id', 'resource_name',
'default_limit']:
self.assertEqual(registered_limits[0][key], ref1[key])
r = self.get(
'/registered_limits?region_id=%s' % self.region_id2,
expected_status=http.client.OK)
registered_limits = r.result['registered_limits']
self.assertEqual(len(registered_limits), 1)
for key in ['service_id', 'region_id', 'resource_name',
'default_limit']:
self.assertEqual(registered_limits[0][key], ref2[key])
r = self.get(
'/registered_limits?resource_name=test_resource',
expected_status=http.client.OK)
registered_limits = r.result['registered_limits']
self.assertEqual(len(registered_limits), 2)
def test_show_registered_limit(self):
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
ref2 = unit.new_registered_limit_ref(service_id=self.service_id2,
region_id=self.region_id2)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['registered_limits'][0]['id']
self.get(
'/registered_limits/fake_id',
expected_status=http.client.NOT_FOUND)
r = self.get(
'/registered_limits/%s' % id1,
expected_status=http.client.OK)
registered_limit = r.result['registered_limit']
for key in ['service_id', 'region_id', 'resource_name',
'default_limit', 'description']:
self.assertEqual(registered_limit[key], ref1[key])
def test_delete_registered_limit(self):
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id)
ref2 = unit.new_registered_limit_ref(service_id=self.service_id2,
region_id=self.region_id2)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['registered_limits'][0]['id']
self.delete('/registered_limits/%s' % id1,
token=self.system_admin_token,
expected_status=http.client.NO_CONTENT)
self.delete('/registered_limits/fake_id',
token=self.system_admin_token,
expected_status=http.client.NOT_FOUND)
r = self.get(
'/registered_limits',
expected_status=http.client.OK)
registered_limits = r.result['registered_limits']
self.assertEqual(len(registered_limits), 1)
def test_delete_registered_limit_with_referenced_limit(self):
ref = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
default_limit=10)
r = self.post(
'/registered_limits',
body={'registered_limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id = r.result['registered_limits'][0]['id']
self.delete('/registered_limits/%s' % id,
expected_status=http.client.FORBIDDEN)
class LimitsTestCase(test_v3.RestfulTestCase):
"""Test limits CRUD."""
def setUp(self):
super(LimitsTestCase, self).setUp()
# FIXME(lbragstad): Remove all this duplicated logic once we get all
# keystone tests using bootstrap consistently. This is something the
# bootstrap utility already does for us.
reader_role = {'id': uuid.uuid4().hex, 'name': 'reader'}
reader_role = PROVIDERS.role_api.create_role(
reader_role['id'], reader_role
)
member_role = {'id': uuid.uuid4().hex, 'name': 'member'}
member_role = PROVIDERS.role_api.create_role(
member_role['id'], member_role
)
PROVIDERS.role_api.create_implied_role(self.role_id, member_role['id'])
PROVIDERS.role_api.create_implied_role(
member_role['id'], reader_role['id']
)
# Most of these tests require system-scoped tokens. Let's have one on
# hand so that we can use it in tests when we need it.
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.role_id
)
self.system_admin_token = self.get_system_scoped_token()
# There is already a sample service and region created from
# load_sample_data() but we're going to create another service and
# region for specific testing purposes.
response = self.post('/regions', body={'region': {}})
self.region2 = response.json_body['region']
self.region_id2 = self.region2['id']
service_ref = {'service': {
'name': uuid.uuid4().hex,
'enabled': True,
'type': 'type2'}}
response = self.post('/services', body=service_ref)
self.service2 = response.json_body['service']
self.service_id2 = self.service2['id']
ref1 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_registered_limit_ref(service_id=self.service_id2,
resource_name='snapshot')
ref3 = unit.new_registered_limit_ref(service_id=self.service_id,
region_id=self.region_id,
resource_name='backup')
self.post(
'/registered_limits',
body={'registered_limits': [ref1, ref2, ref3]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
# Create more assignments, all are:
#
# self.user -- admin -- self.project
# self.user -- non-admin -- self.project_2
# self.user -- admin -- self.domain
# self.user -- non-admin -- self.domain_2
# self.user -- admin -- system
self.project_2 = unit.new_project_ref(domain_id=self.domain_id)
self.project_2_id = self.project_2['id']
PROVIDERS.resource_api.create_project(self.project_2_id,
self.project_2)
self.domain_2 = unit.new_domain_ref()
self.domain_2_id = self.domain_2['id']
PROVIDERS.resource_api.create_domain(self.domain_2_id, self.domain_2)
self.role_2 = unit.new_role_ref(name='non-admin')
self.role_2_id = self.role_2['id']
PROVIDERS.role_api.create_role(self.role_2_id, self.role_2)
PROVIDERS.assignment_api.create_grant(
self.role_2_id, user_id=self.user_id, project_id=self.project_2_id)
PROVIDERS.assignment_api.create_grant(
self.role_id, user_id=self.user_id, domain_id=self.domain_id)
PROVIDERS.assignment_api.create_grant(
self.role_2_id, user_id=self.user_id, domain_id=self.domain_2_id)
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.role_id)
def test_create_project_limit(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertIsNotNone(limits[0]['id'])
self.assertIsNone(limits[0]['domain_id'])
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit', 'description', 'project_id']:
self.assertEqual(limits[0][key], ref[key])
def test_create_domain_limit(self):
ref = unit.new_limit_ref(domain_id=self.domain_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertIsNotNone(limits[0]['id'])
self.assertIsNone(limits[0]['project_id'])
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit', 'description', 'domain_id']:
self.assertEqual(limits[0][key], ref[key])
def test_create_limit_without_region(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertIsNotNone(limits[0]['id'])
self.assertIsNotNone(limits[0]['project_id'])
for key in ['service_id', 'resource_name', 'resource_limit']:
self.assertEqual(limits[0][key], ref[key])
self.assertIsNone(limits[0].get('region_id'))
def test_create_limit_without_description(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref.pop('description')
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertIsNotNone(limits[0]['id'])
self.assertIsNotNone(limits[0]['project_id'])
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit']:
self.assertEqual(limits[0][key], ref[key])
self.assertIsNone(limits[0]['description'])
def test_create_limit_with_domain_as_project(self):
ref = unit.new_limit_ref(project_id=self.domain_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
r = self.post('/limits', body={'limits': [ref]},
token=self.system_admin_token)
limits = r.result['limits']
self.assertIsNone(limits[0]['project_id'])
self.assertEqual(self.domain_id, limits[0]['domain_id'])
def test_create_multi_limit(self):
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
for key in ['service_id', 'resource_name', 'resource_limit']:
self.assertEqual(limits[0][key], ref1[key])
self.assertEqual(limits[1][key], ref2[key])
self.assertEqual(limits[0]['region_id'], ref1['region_id'])
self.assertIsNone(limits[1].get('region_id'))
def test_create_limit_return_count(self):
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
r = self.post(
'/limits',
body={'limits': [ref1]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertEqual(1, len(limits))
ref2 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
ref3 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='backup')
r = self.post(
'/limits',
body={'limits': [ref2, ref3]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limits = r.result['limits']
self.assertEqual(2, len(limits))
def test_create_limit_with_invalid_input(self):
ref1 = unit.new_limit_ref(project_id=self.project_id,
resource_limit='not_int')
ref2 = unit.new_limit_ref(project_id=self.project_id,
resource_name=123)
ref3 = unit.new_limit_ref(project_id=self.project_id,
region_id='fake_region')
for input_limit in [ref1, ref2, ref3]:
self.post(
'/limits',
body={'limits': [input_limit]},
token=self.system_admin_token,
expected_status=http.client.BAD_REQUEST)
def test_create_limit_duplicate(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CONFLICT)
def test_create_limit_without_reference_registered_limit(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id2,
resource_name='volume')
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_update_limit(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_ref = {
'resource_limit': 5,
'description': 'test description'
}
r = self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.OK)
new_limits = r.result['limit']
self.assertEqual(new_limits['resource_limit'], 5)
self.assertEqual(new_limits['description'], 'test description')
def test_update_limit_not_found(self):
update_ref = {
'resource_limit': 5
}
self.patch(
'/limits/%s' % uuid.uuid4().hex,
body={'limit': update_ref},
token=self.system_admin_token,
expected_status=http.client.NOT_FOUND)
def test_update_limit_with_invalid_input(self):
ref = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
r = self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
limit_id = r.result['limits'][0]['id']
invalid_resource_limit_update = {
'resource_limit': 'not_int'
}
invalid_description_update = {
'description': 123
}
for input_limit in [invalid_resource_limit_update,
invalid_description_update]:
self.patch(
'/limits/%s' % limit_id,
body={'limit': input_limit},
token=self.system_admin_token,
expected_status=http.client.BAD_REQUEST)
def test_list_limit(self):
r = self.get(
'/limits',
token=self.system_admin_token,
expected_status=http.client.OK)
self.assertEqual([], r.result.get('limits'))
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['limits'][0]['id']
r = self.get(
'/limits',
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(len(limits), 2)
if limits[0]['id'] == id1:
self.assertEqual(limits[0]['region_id'], ref1['region_id'])
self.assertIsNone(limits[1].get('region_id'))
for key in ['service_id', 'resource_name', 'resource_limit']:
self.assertEqual(limits[0][key], ref1[key])
self.assertEqual(limits[1][key], ref2[key])
else:
self.assertEqual(limits[1]['region_id'], ref1['region_id'])
self.assertIsNone(limits[0].get('region_id'))
for key in ['service_id', 'resource_name', 'resource_limit']:
self.assertEqual(limits[1][key], ref1[key])
self.assertEqual(limits[0][key], ref2[key])
r = self.get(
'/limits?service_id=%s' % self.service_id2,
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(len(limits), 1)
for key in ['service_id', 'resource_name', 'resource_limit']:
self.assertEqual(limits[0][key], ref2[key])
r = self.get(
'/limits?region_id=%s' % self.region_id,
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(len(limits), 1)
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit']:
self.assertEqual(limits[0][key], ref1[key])
r = self.get(
'/limits?resource_name=volume',
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(len(limits), 1)
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit']:
self.assertEqual(limits[0][key], ref1[key])
def test_list_limit_with_project_id_filter(self):
# create two limit in different projects for test.
self.config_fixture.config(group='oslo_policy',
enforce_scope=True)
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(project_id=self.project_2_id,
service_id=self.service_id2,
resource_name='snapshot')
self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
# non system scoped request will get the limits in its project.
r = self.get('/limits', expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.project_id, limits[0]['project_id'])
r = self.get(
'/limits', expected_status=http.client.OK,
auth=self.build_authentication_request(
user_id=self.user['id'], password=self.user['password'],
project_id=self.project_2_id))
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.project_2_id, limits[0]['project_id'])
# any project user can filter by their own project
r = self.get(
'/limits?project_id=%s' % self.project_id,
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.project_id, limits[0]['project_id'])
# a system scoped request can specify the project_id filter
r = self.get(
'/limits?project_id=%s' % self.project_id,
expected_status=http.client.OK,
token=self.system_admin_token
)
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.project_id, limits[0]['project_id'])
def test_list_limit_with_domain_id_filter(self):
# create two limit in different domains for test.
ref1 = unit.new_limit_ref(domain_id=self.domain_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(domain_id=self.domain_2_id,
service_id=self.service_id2,
resource_name='snapshot')
self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
# non system scoped request will get the limits in its domain.
r = self.get(
'/limits', expected_status=http.client.OK,
auth=self.build_authentication_request(
user_id=self.user['id'], password=self.user['password'],
domain_id=self.domain_id))
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.domain_id, limits[0]['domain_id'])
r = self.get(
'/limits', expected_status=http.client.OK,
auth=self.build_authentication_request(
user_id=self.user['id'], password=self.user['password'],
domain_id=self.domain_2_id))
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.domain_2_id, limits[0]['domain_id'])
# if non system scoped request contain domain_id filter, keystone
# will return an empty list.
r = self.get(
'/limits?domain_id=%s' % self.domain_id,
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(0, len(limits))
# a system scoped request can specify the domain_id filter
r = self.get(
'/limits?domain_id=%s' % self.domain_id,
expected_status=http.client.OK,
auth=self.build_authentication_request(
user_id=self.user['id'], password=self.user['password'],
system=True))
limits = r.result['limits']
self.assertEqual(1, len(limits))
self.assertEqual(self.domain_id, limits[0]['domain_id'])
def test_show_project_limit(self):
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
if r.result['limits'][0]['resource_name'] == 'volume':
id1 = r.result['limits'][0]['id']
else:
id1 = r.result['limits'][1]['id']
self.get('/limits/fake_id',
token=self.system_admin_token,
expected_status=http.client.NOT_FOUND)
r = self.get('/limits/%s' % id1,
expected_status=http.client.OK)
limit = r.result['limit']
self.assertIsNone(limit['domain_id'])
for key in ['service_id', 'region_id', 'resource_name',
'resource_limit', 'description', 'project_id']:
self.assertEqual(limit[key], ref1[key])
def test_show_domain_limit(self):
ref1 = unit.new_limit_ref(domain_id=self.domain_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref1]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['limits'][0]['id']
r = self.get('/limits/%s' % id1,
expected_status=http.client.OK,
auth=self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain_id))
limit = r.result['limit']
self.assertIsNone(limit['project_id'])
self.assertIsNone(limit['region_id'])
for key in ['service_id', 'resource_name', 'resource_limit',
'description', 'domain_id']:
self.assertEqual(limit[key], ref1[key])
def test_delete_limit(self):
ref1 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume')
ref2 = unit.new_limit_ref(project_id=self.project_id,
service_id=self.service_id2,
resource_name='snapshot')
r = self.post(
'/limits',
body={'limits': [ref1, ref2]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
id1 = r.result['limits'][0]['id']
self.delete('/limits/%s' % id1,
token=self.system_admin_token,
expected_status=http.client.NO_CONTENT)
self.delete('/limits/fake_id',
token=self.system_admin_token,
expected_status=http.client.NOT_FOUND)
r = self.get(
'/limits',
token=self.system_admin_token,
expected_status=http.client.OK)
limits = r.result['limits']
self.assertEqual(len(limits), 1)
class StrictTwoLevelLimitsTestCase(LimitsTestCase):
def setUp(self):
super(StrictTwoLevelLimitsTestCase, self).setUp()
# Most of these tests require system-scoped tokens. Let's have one on
# hand so that we can use it in tests when we need it.
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.role_id
)
self.system_admin_token = self.get_system_scoped_token()
# create two hierarchical projects trees for test. The first level is
# domain.
# A D
# / \ / \
# B C E F
domain_ref = {'domain': {'name': 'A', 'enabled': True}}
response = self.post('/domains', body=domain_ref)
self.domain_A = response.json_body['domain']
project_ref = {'project': {'name': 'B', 'enabled': True,
'domain_id': self.domain_A['id']}}
response = self.post('/projects', body=project_ref)
self.project_B = response.json_body['project']
project_ref = {'project': {'name': 'C', 'enabled': True,
'domain_id': self.domain_A['id']}}
response = self.post('/projects', body=project_ref)
self.project_C = response.json_body['project']
domain_ref = {'domain': {'name': 'D', 'enabled': True}}
response = self.post('/domains', body=domain_ref)
self.domain_D = response.json_body['domain']
project_ref = {'project': {'name': 'E', 'enabled': True,
'domain_id': self.domain_D['id']}}
response = self.post('/projects', body=project_ref)
self.project_E = response.json_body['project']
project_ref = {'project': {'name': 'F', 'enabled': True,
'domain_id': self.domain_D['id']}}
response = self.post('/projects', body=project_ref)
self.project_F = response.json_body['project']
def config_overrides(self):
super(StrictTwoLevelLimitsTestCase, self).config_overrides()
self.config_fixture.config(group='unified_limit',
enforcement_model='strict_two_level')
def test_create_child_limit(self):
# when A is 20, success to create B to 15, C to 18.
# A,20 A,20
# / \ --> / \
# B C B,15 C,18
ref = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=20)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=15)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=18)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
def test_create_child_limit_break_hierarchical_tree(self):
# when A is 20, success to create B to 15, but fail to create C to 21.
# A,20 A,20
# / \ --> / \
# B C B,15 C
#
# A,20 A,20
# / \ -/-> / \
# B,15 C B,15 C,21
ref = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=20)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=15)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=21)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_create_child_with_default_parent(self):
# If A is not set, the default value is 10 (from registered limit).
# success to create B to 5, but fail to create C to 11.
# A(10) A(10)
# / \ --> / \
# B C B,5 C
#
# A(10) A(10)
# / \ -/-> / \
# B,5 C B,5 C,11
ref = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=11)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_create_parent_limit(self):
# When B is 9 , success to set A to 12
# A A,12
# / \ --> / \
# B,9 C B,9 C
ref = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=12)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
def test_create_parent_limit_break_hierarchical_tree(self):
# When B is 9 , fail to set A to 8
# A A,8
# / \ -/-> / \
# B,9 C B,9 C
ref = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=8)
self.post(
'/limits',
body={'limits': [ref]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_create_multi_limits(self):
# success to create a tree in one request like:
# A,12 D,9
# / \ / \
# B,9 C,5 E,5 F,4
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=12)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
ref_D = unit.new_limit_ref(domain_id=self.domain_D['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
ref_E = unit.new_limit_ref(project_id=self.project_E['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
ref_F = unit.new_limit_ref(project_id=self.project_F['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=4)
self.post(
'/limits',
body={'limits': [ref_A, ref_B, ref_C, ref_D, ref_E, ref_F]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
def test_create_multi_limits_invalid_input(self):
# fail to create a tree in one request like:
# A,12 D,9
# / \ / \
# B,9 C,5 E,5 F,10
# because F will break the second limit tree.
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=12)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
ref_D = unit.new_limit_ref(domain_id=self.domain_D['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
ref_E = unit.new_limit_ref(project_id=self.project_E['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
ref_F = unit.new_limit_ref(project_id=self.project_F['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
self.post(
'/limits',
body={'limits': [ref_A, ref_B, ref_C, ref_D, ref_E, ref_F]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_create_multi_limits_break_hierarchical_tree(self):
# when there is some hierarchical_trees already like:
# A,12 D
# / \ / \
# B,9 C E,5 F
# fail to set C to 5 and D to 4 in one request like:
# A,12 D,4
# / \ / \
# B,9 C,5 E,5 F
# because D will break the second limit tree.
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=12)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=9)
ref_E = unit.new_limit_ref(project_id=self.project_E['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
self.post(
'/limits',
body={'limits': [ref_A, ref_B, ref_E]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=5)
ref_D = unit.new_limit_ref(domain_id=self.domain_D['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=4)
self.post(
'/limits',
body={'limits': [ref_C, ref_D]},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_update_child_limit(self):
# Success to update C to 9
# A,10 A,10
# / \ --> / \
# B,6 C,7 B,6 C,9
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=6)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=7)
self.post(
'/limits',
body={'limits': [ref_A, ref_B]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
r = self.post(
'/limits',
body={'limits': [ref_C]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_dict = {'resource_limit': 9}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.OK)
def test_update_child_limit_break_hierarchical_tree(self):
# Fail to update C to 11
# A,10 A,10
# / \ -/-> / \
# B,6 C,7 B,6 C,11
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=6)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=7)
self.post(
'/limits',
body={'limits': [ref_A, ref_B]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
r = self.post(
'/limits',
body={'limits': [ref_C]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_dict = {'resource_limit': 11}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_update_child_limit_with_default_parent(self):
# If A is not set, the default value is 10 (from registered limit).
# Success to update C to 9 but fail to update C to 11
# A,(10) A,(10)
# / \ --> / \
# B, C,7 B C,9
#
# A,(10) A,(10)
# / \ -/-> / \
# B, C,7 B C,11
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=7)
r = self.post(
'/limits',
body={'limits': [ref_C]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_dict = {'resource_limit': 9}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.OK)
update_dict = {'resource_limit': 11}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
def test_update_parent_limit(self):
# Success to update A to 8
# A,10 A,8
# / \ --> / \
# B,6 C,7 B,6 C,7
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=6)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=7)
r = self.post(
'/limits',
body={'limits': [ref_A]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
self.post(
'/limits',
body={'limits': [ref_B, ref_C]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_dict = {'resource_limit': 8}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.OK)
def test_update_parent_limit_break_hierarchical_tree(self):
# Fail to update A to 6
# A,10 A,6
# / \ -/-> / \
# B,6 C,7 B,6 C,7
ref_A = unit.new_limit_ref(domain_id=self.domain_A['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=10)
ref_B = unit.new_limit_ref(project_id=self.project_B['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=6)
ref_C = unit.new_limit_ref(project_id=self.project_C['id'],
service_id=self.service_id,
region_id=self.region_id,
resource_name='volume',
resource_limit=7)
r = self.post(
'/limits',
body={'limits': [ref_A]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
self.post(
'/limits',
body={'limits': [ref_B, ref_C]},
token=self.system_admin_token,
expected_status=http.client.CREATED)
update_dict = {'resource_limit': 6}
self.patch(
'/limits/%s' % r.result['limits'][0]['id'],
body={'limit': update_dict},
token=self.system_admin_token,
expected_status=http.client.FORBIDDEN)
| 44.594317 | 79 | 0.528966 | 7,866 | 73,759 | 4.67938 | 0.039664 | 0.051022 | 0.058683 | 0.078244 | 0.901462 | 0.876195 | 0.845088 | 0.823055 | 0.79893 | 0.7829 | 0 | 0.012357 | 0.366938 | 73,759 | 1,653 | 80 | 44.621295 | 0.775924 | 0.060915 | 0 | 0.798422 | 0 | 0 | 0.096236 | 0.007594 | 0 | 0 | 0 | 0.000605 | 0.068149 | 1 | 0.043042 | false | 0.003587 | 0.005022 | 0 | 0.050933 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
917903ab663f0bf5649de4109fb3726c457a0c32 | 281,759 | py | Python | bireme/thesaurus/views.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/thesaurus/views.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/thesaurus/views.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #! coding: utf-8
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import render, render_to_response, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.contrib.contenttypes.models import ContentType
from django.views import generic
from utils.views import LoginRequiredView, GenericUpdateWithOneFormset
from django.conf import settings
from models import *
from forms import *
from django.db.models import Prefetch
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, get_language
from django.core.paginator import Paginator
from django.contrib import messages
import datetime
from utils.context_processors import additional_user_info
ITEMS_PER_PAGE = 10
# form actions
ACTIONS = {
'term_string': '',
'filter_language': '',
'descriptor_ui': '',
'abbreviation': '',
'qualifier_ui': '',
'filter_fields': '',
'filter_status': '',
'decs_code': '',
'tree_number': '',
'concept_ui': '',
'orderby': 'id',
'order': '',
'page': 1,
'visited': '',
's': '',
'exact': '',
'form_language': '',
'choiced_thesaurus': '',
'choiced_thesaurus_name': '',
'choiced_concept_identifier_id': '',
'choiced_term_id': '',
'choiced_term_string': '',
'choiced_language_code': '',
}
# Descriptors ------------------------------------------------------------------------
class DescUpdate(LoginRequiredView):
"""
Handle creation and update of Descriptors objects
Create the first form
"""
model = IdentifierDesc
success_url = reverse_lazy('create_concept_termdesc')
form_class = IdentifierDescForm
template_name = "thesaurus/descriptor_form_step1.html"
def form_valid(self, form):
formset_descriptor = DescriptionDescFormSet(self.request.POST, instance=self.object)
formset_treenumber = TreeNumbersListDescFormSet(self.request.POST, instance=self.object)
formset_pharmaco = PharmacologicalActionListDescFormSet(self.request.POST, instance=self.object)
formset_related = SeeRelatedListDescFormSet(self.request.POST, instance=self.object)
formset_previous = PreviousIndexingListDescFormSet(self.request.POST, instance=self.object)
formset_entrycombination = EntryCombinationListDescFormSet(self.request.POST, instance=self.object)
# run all validation before for display formset errors at form
form_valid = form.is_valid()
formset_descriptor_valid = formset_descriptor.is_valid()
formset_treenumber_valid = formset_treenumber.is_valid()
formset_pharmaco_valid = formset_pharmaco.is_valid()
formset_related_valid = formset_related.is_valid()
formset_previous_valid = formset_previous.is_valid()
formset_entrycombination_valid = formset_entrycombination.is_valid()
if (form_valid and
formset_descriptor_valid and
formset_treenumber_valid and
formset_pharmaco_valid and
formset_related_valid and
formset_previous_valid and
formset_entrycombination_valid
):
# Verifica se foi passado algum valor para formset_treenumber
tree_number_existentes=''
exist_tree_numbers=0
for f in formset_treenumber:
if f.cleaned_data is not None:
fields_t = f.cleaned_data
content_tree_number = fields_t.get('tree_number')
# Essa variável é do cleaned_data e diz se o registro foi apagado ou não no formulário
# Se for True foi apagado
status_preenchimento = fields_t.get('DELETE')
if status_preenchimento == False:
exist_tree_numbers = exist_tree_numbers + 1
# Verifica se ja existe cadastrado o tree_number para este tesauro
result_tree_number = TreeNumbersListDesc.objects.filter(tree_number=content_tree_number).values('identifier_id')
if result_tree_number is not None:
for t in result_tree_number:
identifier_id_existent_tree_number = t.get('identifier_id')
# Checks if the record is for the thesaurus being worked on
res_existent_thesaurus_id = IdentifierDesc.objects.filter(id=identifier_id_existent_tree_number).values('thesaurus_id')
existent_thesaurus_id=res_existent_thesaurus_id[0].get('thesaurus_id')
# Brings id of thesaurus currently operating
environment_thesaurus_id = self.request.GET.get("ths")
# If tree_number exists in same thesaurus creates error
if int(environment_thesaurus_id) == int(existent_thesaurus_id):
tree_number_existentes = tree_number_existentes + content_tree_number + ' '
# Condição para poder criar registro
if not tree_number_existentes and exist_tree_numbers > 0:
# Bring the choiced language_code from the first form
registry_language = self.request.GET.get("language_code")
# Get sequential number to write to decs_code
self.object = form.save(commit=False)
ths = self.request.GET.get("ths")
try:
seq = code_controller.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller.DoesNotExist:
seq = code_controller(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
self.object.decs_code = nseq
self.object = form.save(commit=True)
# Get thesaurus_acronym to create new ID format to descriptor_ui field
self.object = form.save(commit=False)
try:
acronym = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_acronym')
# recupera o acronimo e transforma em maiusuclo
acronym = str(acronym[0].get('thesaurus_acronym')).upper()
# utiliza self.object.decs_code para compor descriptor_ui
zseq = str(self.object.decs_code).zfill(6) # preenche zeros a esquerda
self.object.descriptor_ui = 'D' + acronym + zseq
except Thesaurus.DoesNotExist:
id_thesaurus = str(self.object.id)
print 'Warning! - No thesaurus_acronym for id -->',id_thesaurus
self.object = form.save(commit=True)
formset_descriptor.instance = self.object
formset_descriptor.save()
formset_treenumber.instance = self.object
formset_treenumber.save()
formset_pharmaco.instance = self.object
formset_pharmaco.save()
formset_related.instance = self.object
formset_related.save()
formset_previous.instance = self.object
formset_previous.save()
formset_entrycombination.instance = self.object
formset_entrycombination.save()
form.save()
# Essas variaveis dizem respeito a criação de novo registro a partir de um termo existente
# Quando existirem serão repassadas para a faze de criação de conceito
if self.request.GET.get("term_ui") and self.request.GET.get("term_id"):
term_ui_alter = self.request.GET.get("term_ui")
term_id_alter = self.request.GET.get("term_id")
return redirect(reverse('create_concept_termdesc') + '?ths=' + self.request.GET.get("ths") + '&' + 'registry_language=' + registry_language + '&term=' + self.request.GET.get("term") + '&term_ui_alter=' + term_ui_alter + '&term_id_alter=' + term_id_alter)
else:
return redirect(reverse('create_concept_termdesc') + '?ths=' + self.request.GET.get("ths") + '&' + 'registry_language=' + registry_language + '&term=' + self.request.GET.get("term"))
else:
if exist_tree_numbers == 0:
msg_erro = _("Hierarchical level")
else:
msg_erro = _("already exists!!!") + ' -----> ' + tree_number_existentes
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
formset_pharmaco=formset_pharmaco,
formset_related=formset_related,
formset_previous=formset_previous,
formset_entrycombination=formset_entrycombination,
msg_erro=msg_erro,
)
)
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
formset_pharmaco=formset_pharmaco,
formset_related=formset_related,
formset_previous=formset_previous,
formset_entrycombination=formset_entrycombination,
)
)
# Faz com que o forms.py tenha um pre filtro para abbreviation
def get_form_kwargs(self):
ths = self.request.GET.get("ths")
kwargs = super(DescUpdate, self).get_form_kwargs()
kwargs.update({'ths': ths})
return kwargs
def form_invalid(self, form):
# force use of form_valid method to run all validations
return self.form_valid(form)
def get_context_data(self, **kwargs):
context = super(DescUpdate, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['language_system'] = get_language()
if self.request.method == 'GET':
context['formset_descriptor'] = DescriptionDescFormSet(instance=self.object)
context['formset_treenumber'] = TreeNumbersListDescFormSet(instance=self.object)
context['formset_pharmaco'] = PharmacologicalActionListDescFormSet(instance=self.object)
context['formset_related'] = SeeRelatedListDescFormSet(instance=self.object)
context['formset_previous'] = PreviousIndexingListDescFormSet(instance=self.object)
context['formset_entrycombination'] = EntryCombinationListDescFormSet(instance=self.object)
return context
class DescCreateView(DescUpdate, CreateView):
"""
Used as class view to create Descriptors
"""
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(DescCreateView, self).dispatch(*args, **kwargs)
class DescDeleteView(DescUpdate, DeleteView):
"""
Used as class view to delete Descriptors
"""
model = IdentifierDesc
template_name = 'thesaurus/descriptor_confirm_delete.html'
def get_success_url(self):
# messages.success(self.request, 'is deleted')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/%s' % ths
class TermListDescChk(LoginRequiredView, ListView):
"""
Used to verify if already exist the term
"""
template_name = "thesaurus/descriptor_form_step0.html"
context_object_name = "registers"
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(TermListDescChk, self).dispatch(*args, **kwargs)
def get_queryset(self):
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
return object_list
def get_context_data(self, **kwargs):
context = super(TermListDescChk, self).get_context_data(**kwargs)
context['choiced_thesaurus_info'] = Thesaurus.objects.filter(id=self.request.GET.get("thesaurus"))
context['term_choiced'] = self.actions['term_string']
context['filter_language'] = self.actions['filter_language']
return context
def render_to_response(self, context):
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['term_string'] and self.actions['filter_language']:
term_choiced = self.actions['term_string']
# Para poder criar um termo novo, não deve existir
# -1 - Rascunho
# 1 - Publicado
# 5 - Histórico
# Search by draft record
q_status_draft = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_choiced.encode('utf-8'):
# print 'Igual-->',t,' - ',term_choiced.encode('utf-8')
has_equal=t
if not has_equal:
return redirect('/thesaurus/descriptors/new/?ths=' + self.request.GET.get("thesaurus") + '&term=' + self.actions['term_string'] + '&language_code=' + self.actions['filter_language'])
return super(TermListDescChk, self).render_to_response(context)
class DescRegisterUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update descriptor information
"""
model = IdentifierDesc
template_name = 'thesaurus/descriptor_edit_register.html'
form_class = IdentifierDescForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(DescRegisterUpdateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
id_register = self.object.id
# Search ID of the first concept of the record to then search first term of the concept
concepts_of_register = IdentifierConceptListDesc.objects.filter(identifier_id=id_register).values('id')
id_concept = concepts_of_register[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListDesc.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
formset_descriptor = DescriptionDescFormSet(self.request.POST, instance=self.object)
formset_treenumber = TreeNumbersListDescFormSet(self.request.POST, instance=self.object)
formset_pharmaco = PharmacologicalActionListDescFormSet(self.request.POST, instance=self.object)
formset_related = SeeRelatedListDescFormSet(self.request.POST, instance=self.object)
formset_previous = PreviousIndexingListDescFormSet(self.request.POST, instance=self.object)
formset_entrycombination = EntryCombinationListDescFormSet(self.request.POST, instance=self.object)
# run all validation before for display formset errors at form
form_valid = form.is_valid()
formset_descriptor_valid = formset_descriptor.is_valid()
formset_treenumber_valid = formset_treenumber.is_valid()
formset_pharmaco_valid = formset_pharmaco.is_valid()
formset_related_valid = formset_related.is_valid()
formset_previous_valid = formset_previous.is_valid()
formset_entrycombination_valid = formset_entrycombination.is_valid()
if (form_valid and
formset_descriptor_valid and
formset_treenumber_valid and
formset_related_valid and
formset_pharmaco_valid and
formset_previous_valid and
formset_entrycombination_valid
):
# Verifica se foi passado algum valor para formset_treenumber
tree_number_existentes=''
# Utilizado para verificar se o form está totalmente vazio - o que não deve ocorrer
form_vazio=True
exist_tree_numbers=0
for f in formset_treenumber:
if f.cleaned_data is not None:
fields_t = f.cleaned_data
content_tree_number = fields_t.get('tree_number')
identifier_id = fields_t.get('identifier')
# Onde identifier_id = 'identifier': <IdentifierDesc: 34244>
# Exemplo
# ---> {'identifier': <IdentifierDesc: 34244>, 'tree_number': u'SP4.026.307.808.100', u'id': <TreeNumbersListDesc: 68760>, u'DELETE': False}
# Para utilizar referenciar identifier_id.id
# Essa variável é do cleaned_data e diz se o registro foi apagado ou não no formulário
# Se for True foi apagado
status_preenchimento = fields_t.get('DELETE')
if status_preenchimento == False:
form_vazio=False
exist_tree_numbers = exist_tree_numbers + 1
# Verifica se ja existe cadastrado o tree_number para este tesauro
result_tree_number = TreeNumbersListDesc.objects.filter(tree_number=content_tree_number).exclude(identifier_id=identifier_id.id).values('identifier_id')
if result_tree_number is not None:
for t in result_tree_number:
identifier_id_existent_tree_number = t.get('identifier_id')
# Checks if the record is for the thesaurus being worked on
res_existent_thesaurus_id = IdentifierDesc.objects.filter(id=identifier_id_existent_tree_number).values('thesaurus_id')
existent_thesaurus_id=res_existent_thesaurus_id[0].get('thesaurus_id')
# Brings id of thesaurus currently operating
environment_thesaurus_id = self.request.GET.get("ths")
# If tree_number exists in same thesaurus creates error
if int(environment_thesaurus_id) == int(existent_thesaurus_id):
tree_number_existentes = tree_number_existentes + content_tree_number + ' '
# Condição para poder criar registro
if not tree_number_existentes and exist_tree_numbers > 0 and form_vazio==False:
# Bring the choiced language_code from the first form
registry_language = formset_descriptor.cleaned_data[0].get('language_code')
self.object = form.save()
formset_descriptor.instance = self.object
formset_descriptor.save()
formset_treenumber.instance = self.object
formset_treenumber.save()
formset_pharmaco.instance = self.object
formset_pharmaco.save()
formset_related.instance = self.object
formset_related.save()
formset_previous.instance = self.object
formset_previous.save()
formset_entrycombination.instance = self.object
formset_entrycombination.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
if form_vazio == True:
ths = self.request.GET.get("ths")
return redirect('/thesaurus/descriptors?ths=' + ths)
elif exist_tree_numbers == 0:
msg_erro = _("Hierarchical level")
else:
msg_erro = _("already exists!!!") + ' -----> ' + tree_number_existentes
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
formset_pharmaco=formset_pharmaco,
formset_related=formset_related,
formset_previous=formset_previous,
formset_entrycombination=formset_entrycombination,
msg_erro=msg_erro,
)
)
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
formset_pharmaco=formset_pharmaco,
formset_related=formset_related,
formset_previous=formset_previous,
formset_entrycombination=formset_entrycombination,
)
)
# Makes forms.py have a pre-filter for abbreviation
def get_form_kwargs(self):
ths = self.request.GET.get("ths")
kwargs = super(DescRegisterUpdateView, self).get_form_kwargs()
kwargs.update({'ths': ths})
return kwargs
def form_invalid(self, form):
# force use of form_valid method to run all validations
return self.form_valid(form)
def get_context_data(self, **kwargs):
context = super(DescRegisterUpdateView, self).get_context_data(**kwargs)
context['language_system'] = get_language()
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.method == 'GET':
context['formset_descriptor'] = DescriptionDescFormSet(instance=self.object)
context['formset_treenumber'] = TreeNumbersListDescFormSet(instance=self.object)
context['formset_pharmaco'] = PharmacologicalActionListDescFormSet(instance=self.object)
context['formset_related'] = SeeRelatedListDescFormSet(instance=self.object)
context['formset_previous'] = PreviousIndexingListDescFormSet(instance=self.object)
context['formset_entrycombination'] = EntryCombinationListDescFormSet(instance=self.object)
return context
class DescListView(LoginRequiredView, ListView):
"""
List descriptor records
"""
template_name = "thesaurus/thesaurus_home.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
elif self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
elif self.request.POST.get("choiced_thesaurus"):
environment_thesaurus_id=self.request.POST.get("choiced_thesaurus")
else:
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
environment_thesaurus_id=self.actions['choiced_thesaurus']
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(DescListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
lang_code = get_language()
object_list = []
registers_indexed = []
concepts_indexed = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
# icontains X exact -------------------------------------------------------------------------------------
if self.actions['exact']:
q_term_string = Q(term_string__exact=self.actions['s'].strip())
else:
q_term_string = Q(term_string__icontains=self.actions['s'].strip())
# term_string
if self.actions['filter_fields'] == 'term_string' and self.actions['exact']:
q_term_string = Q(term_string=self.actions['s'].strip())
else:
if not self.actions['filter_fields'] and not self.actions['exact']:
q_term_string = Q(term_string__icontains=self.actions['s'].strip())
# concept_preferred_term='Y'
q_concept_preferred_term = Q(concept_preferred_term='Y')
# record_preferred_term='Y'
q_record_preferred_term = Q(record_preferred_term='Y')
# status
if self.actions['filter_status']:
q_filter_status = Q(status=self.actions['filter_status'])
# Term
# AND performance for Term ------------------------------------------------------------------------
# Do the initial search in term_string field
if self.actions['s'] and not self.actions['filter_fields']:
object_list = TermListDesc.objects.filter( q_term_string ).filter(term_thesaurus=self.actions['choiced_thesaurus']).exclude(status=-3).order_by('term_string')
else:
# bring all registers
object_list = TermListDesc.objects.all().filter(term_thesaurus=self.actions['choiced_thesaurus']).exclude(status=-3).order_by('term_string')
# term_string
if self.actions['filter_fields'] == 'term_string' and self.actions['s']:
object_list = TermListDesc.objects.filter( q_term_string ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# Concept
# AND performance for Concept ------------------------------------------------------------------------
# when concept_preferred_term='Y' & record_preferred_term='Y'
if self.actions['filter_fields'] == 'concept':
object_list = TermListDesc.objects.filter( q_term_string & q_concept_preferred_term & q_record_preferred_term ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# MESH Descriptor UI
# AND performance for MESH Descriptor UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'descriptor_ui':
id_register = IdentifierDesc.objects.filter(descriptor_ui=self.actions['s'].strip()).values('id')
id_concept = IdentifierConceptListDesc.objects.filter(identifier_id=id_register,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListDesc.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# DeCS Descriptor UI
# AND performance for DeCS Descriptor UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'decs_code':
id_register = IdentifierDesc.objects.filter(decs_code=self.actions['s'].strip()).values('id')
id_concept = IdentifierConceptListDesc.objects.filter(identifier_id=id_register,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListDesc.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# Tree Number
# AND performance for Tree Number --------------------------------------------------------------
if self.actions['filter_fields'] == 'tree_number':
if self.actions['exact']:
id_tree_number = TreeNumbersListDesc.objects.filter(tree_number=self.actions['s'].strip()).values('identifier_id')
else:
id_tree_number = TreeNumbersListDesc.objects.filter(tree_number__icontains=self.actions['s'].strip()).values('identifier_id')
id_concept = IdentifierConceptListDesc.objects.filter(identifier_id__in=id_tree_number,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListDesc.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# Concept UI
# AND performance for Concept UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'concept_ui':
concept_identifier_id = IdentifierConceptListDesc.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
id_register = IdentifierDesc.objects.filter(id__in=concept_identifier_id,thesaurus_id=self.actions['choiced_thesaurus']).values('id')
concept_id = IdentifierConceptListDesc.objects.filter(identifier_id=id_register,concept_ui=self.actions['s'].strip()).values('id')
object_list = TermListDesc.objects.filter(identifier_concept_id=concept_id).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
# if self.actions['visited'] != 'ok':
# if not self.actions['visited']:
# object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(DescListView, self).get_context_data(**kwargs)
context['actions'] = self.actions
context['last_created_objects_list'] = TermListDesc.objects.filter(term_thesaurus=self.request.GET.get("ths")).exclude(status=-3).exclude(status=3).exclude(status=5).exclude(date_created__isnull=True).order_by('-date_created','-id')[:10][::-1]
context['last_altered_objects_list'] = TermListDesc.objects.filter(term_thesaurus=self.request.GET.get("ths")).exclude(status=-3).exclude(date_altered__isnull=True).order_by('-date_altered','-id')[:10][::-1]
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
# FORM 2
# Creates concept and term
class ConceptTermUpdate(LoginRequiredView):
"""
Used as class view to create ConceptTermUpdate
Extend ConceptTermUpdate that do all the work
Create the second form
"""
model = IdentifierConceptListDesc
form_class = IdentifierConceptListDescForm
template_name = 'thesaurus/descriptor_form_step2.html'
def form_valid(self, form):
formset_concept = ConceptListDescFormSet(self.request.POST, instance=self.object)
formset_term = TermListDescFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
formset_term_valid = formset_term.is_valid()
if (form_valid and formset_concept_valid and formset_term_valid):
# Brings form variables to check if it already exists
term_string = self.request.POST.get("termdesc-0-term_string")
language_code = self.request.POST.get("termdesc-0-language_code")
term_thesaurus = self.request.GET.get("ths")
# Se existirem essas variaveis não deverá ser realizado a verificação de existência pois nesse caso
# será forçado a criação de novo registro, e o registro antigo será alterado seu status
if self.request.GET.get("term_ui_alter") and self.request.GET.get("term_id_alter"):
self.object = form.save()
# Get thesaurus_acronym to create new ID format to concept_ui field
self.object = form.save(commit=False)
zseq = str(self.object.id).zfill(8) # preenche zeros a esquerda
self.object.concept_ui = 'FD' + zseq
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
# Update the created term_ui with a old content - term_ui_alter
try:
created_id = int(TermListDesc.objects.latest('id').id)
update_field = TermListDesc.objects.get(id=created_id)
update_field.term_ui = self.request.GET.get("term_ui_alter")
# descobre id do conceito que o termo antigo pertence
identifier_concept_id = TermListDesc.objects.filter(id=self.request.GET.get("term_id_alter")).values('identifier_concept_id')
identifier_concept_id = identifier_concept_id[0].get('identifier_concept_id')
# descobre concept_ui
concept_ui_origem = IdentifierConceptListDesc.objects.filter(id=identifier_concept_id).values('concept_ui')
concept_ui_origem = concept_ui_origem[0].get('concept_ui')
# coleta informação do histórico do term antigo
historical_annotation_old=TermListDesc.objects.filter(id=self.request.GET.get("term_id_alter")).values('historical_annotation')
if len(historical_annotation_old) > 0:
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8')
update_field.historical_annotation = historical_annotation_new
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
# Update old term register, status and historical_annotation - term_id_alter
try:
# Busca informação do concept_ui do novo termo
created_id = int(TermListDesc.objects.latest('id').id)
# descobre id do conceito que o termo antigo pertence
identifier_concept_id = TermListDesc.objects.filter(id=created_id).values('identifier_concept_id')
identifier_concept_id = identifier_concept_id[0].get('identifier_concept_id')
# descobre concept_ui
concept_ui_destino = IdentifierConceptListDesc.objects.filter(id=identifier_concept_id).values('concept_ui')
concept_ui_destino = concept_ui_destino[0].get('concept_ui')
# coleta informação do histórico do term antigo
historical_annotation_old=TermListDesc.objects.filter(id=self.request.GET.get("term_id_alter")).values('historical_annotation')
if len(historical_annotation_old) > 0:
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - sent to ' + concept_ui_destino
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - sent to ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8')
update_field = TermListDesc.objects.get(id=self.request.GET.get("term_id_alter"))
update_field.status = '-3'
update_field.historical_annotation = historical_annotation_new
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
has_term = TermListDesc.objects.filter(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
).exists()
if not has_term:
self.object = form.save()
# Get thesaurus_acronym to create new ID format to concept_ui field
self.object = form.save(commit=False)
zseq = str(self.object.id).zfill(8) # preenche zeros a esquerda
self.object.concept_ui = 'FD' + zseq
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListDesc.objects.latest('id').id)
update_field = TermListDesc.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'd' + zseq
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Concept already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
msg_erro=msg_erro,
))
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
)
)
def get_context_data(self, **kwargs):
context = super(ConceptTermUpdate, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['language_system'] = get_language()
if IdentifierDesc.objects.count() > 0:
context['next_id'] = int(IdentifierDesc.objects.latest('id').id)
else:
context['next_id'] = 1
if self.request.method == 'GET':
context['formset_concept'] = ConceptListDescFormSet(instance=self.object)
context['formset_term'] = TermListDescFormSet(instance=self.object)
return context
class DescCreateView2(ConceptTermUpdate, CreateView):
"""
Used as class view to create Descriptors
"""
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(DescCreateView2, self).dispatch(*args, **kwargs)
def get_success_url(self):
# messages.success(self.request, 'is created')
id_concept = self.object.id
# Pesquisa ID do primeiro termo deste conceito para redirecionar
terms_of_concept = TermListDesc.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( id_term, ths )
# Pesquisa ID do registro para poder saber qual é o ID do conceito destino
class ConceptListDescView(LoginRequiredView, ListView):
"""
List descriptor records (used by relationship popup selection window)
"""
template_name = "thesaurus/search_concept_desc.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def get_queryset(self):
lang_code = get_language()
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['choiced_concept_identifier_id']:
concept_identifier_id = self.actions['choiced_concept_identifier_id']
if self.actions['s']:
try:
id_registro = IdentifierDesc.objects.filter(descriptor_ui=self.actions['s'].strip(),thesaurus=self.request.GET.get("ths")).values('id')
if len(id_registro)>0:
id_registro = id_registro[0].get('id')
# Força somente 1 resultado
object_list = IdentifierConceptListDesc.objects.filter(identifier_id=id_registro).values('identifier_id','termdesc__term_string','termdesc__language_code','termdesc__id')[:1]
except IdentifierDesc.DoesNotExist:
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
if self.actions['visited'] != 'ok':
object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(ConceptListDescView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['actions'] = self.actions
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['s']:
try:
# Força somente 1 resultado
id_registro = IdentifierDesc.objects.filter(descriptor_ui=self.actions['s'].strip(),thesaurus=self.request.GET.get("ths")).values('id')[:1]
# IdentifierDesc
context['id_register_objects'] = IdentifierDesc.objects.filter(
id=id_registro,
).values(
# IdentifierDesc
'id',
'thesaurus',
'descriptor_class',
'descriptor_ui',
'decs_code',
'external_code',
'nlm_class_number',
'date_created',
'date_revised',
'date_established',
'abbreviation',
)
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
except IdentifierDesc.DoesNotExist:
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
return context
def ConceptListDescModification(request,term_id, ths, concept_ori):
# Descobre qual é o id do conceito do termo destino
id_concept_destino = TermListDesc.objects.filter(id=term_id).values('identifier_concept_id')
id_concept_destino = id_concept_destino[0].get('identifier_concept_id')
identifier_id_destino = IdentifierConceptListDesc.objects.filter(id=id_concept_destino).values('identifier_id')
identifier_id_destino = identifier_id_destino[0].get('identifier_id')
# Verifica se o conceito é preferido, se for deverá ser escolhido o proximo nao preferido que assumirá a predileção
check_preferred_concept_origem = IdentifierConceptListDesc.objects.filter(id=concept_ori).values('preferred_concept')
check_preferred_concept_origem = check_preferred_concept_origem[0].get('preferred_concept')
# Como o registro em TermListDesc será nao preferido, record_preferred_term deverá ser N obrigatoriamente no destino
# Para isso, devo trazer todos os registros de concept_ori e atualizá-los
TermListDesc.objects.filter(identifier_concept_id=concept_ori).update(record_preferred_term='N')
if check_preferred_concept_origem == 'Y':
# Verifica se o conceito origem tem irmãos, se houver e se o conceito origem for preferido então o segundo conceito assumirá a predileção
check_concept_id_origem = IdentifierConceptListDesc.objects.filter(id=concept_ori).values('identifier_id')
check_concept_id_origem = check_concept_id_origem[0].get('identifier_id')
check_concept_id_origem = IdentifierConceptListDesc.objects.filter(identifier_id=check_concept_id_origem).values('identifier_id')
if len(check_concept_id_origem) > 1:
# Descobre qual o id do primeiro registro nao preferido
check_concept_id_not_preferred = IdentifierConceptListDesc.objects.filter(identifier_id=check_concept_id_origem,preferred_concept='N').values('id')
check_concept_id_not_preferred = check_concept_id_not_preferred[0].get('id')
# Atualiza o status do conceito para preferred_concept='Y'
IdentifierConceptListDesc.objects.filter(id=check_concept_id_not_preferred).update(concept_relation_name='',preferred_concept='Y')
# Necessário atualizar também os termos que são preferidos no conceito para também preferidos do registro
TermListDesc.objects.filter(identifier_concept_id=check_concept_id_not_preferred, concept_preferred_term='Y').update(record_preferred_term='Y')
# Atualiza o identifier_id do conceito antigo para novo numero identifier_id_destino
# Atualiza o campo de histórico gravando informação de que registro foi originado
identifier_id_ori = IdentifierConceptListDesc.objects.filter(id=concept_ori).values('identifier_id')
identifier_id_ori = identifier_id_ori[0].get('identifier_id')
descriptor_ui_ori = IdentifierDesc.objects.filter(id=identifier_id_ori).values('descriptor_ui')
descriptor_ui_ori = descriptor_ui_ori[0].get('descriptor_ui')
# Verifica se já existe anotação no historico
has_hist=IdentifierConceptListDesc.objects.filter(id=concept_ori).exclude(historical_annotation__isnull=True).exclude(historical_annotation='').values('id','historical_annotation')
if len(has_hist)>0:
historical_annotation_old=has_hist[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + str(descriptor_ui_ori)
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_new=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + str(descriptor_ui_ori)
IdentifierConceptListDesc.objects.filter(id=concept_ori).update(identifier_id=identifier_id_destino,concept_relation_name='NRW',preferred_concept='N', historical_annotation=historical_annotation_new)
url = '/thesaurus/descriptors/view/' + term_id + '?ths=' + ths
return HttpResponseRedirect(url)
# Pesquisa conceito para poder trazer ID do registro para novo conceito
# Não está sendo utilizado por enquanto
class TermListDescView(LoginRequiredView, ListView):
"""
List descriptor records (used by relationship popup selection window)
"""
template_name = "thesaurus/search_term_desc.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def get_queryset(self):
lang_code = get_language()
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['choiced_concept_identifier_id']:
concept_identifier_id = self.actions['choiced_concept_identifier_id']
if self.actions['s']:
try:
# Como o concept_ui pode existir em mais de um tesauro devemos descobrir qual o id em IdentifierDesc que é pertinente ao tesauro em questão
concepts = IdentifierConceptListDesc.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
for x in concepts:
id_identifier = x.get('identifier_id')
has_register = IdentifierDesc.objects.filter(id=id_identifier,thesaurus_id=self.request.GET.get("ths")).exists()
if has_register:
# Força somente 1 resultado
object_list = IdentifierConceptListDesc.objects.filter(concept_ui=self.actions['s'].strip(),identifier_id=id_identifier).values('identifier_id','termdesc__term_string','termdesc__language_code','termdesc__id')[:1]
except IdentifierConceptListDesc.DoesNotExist:
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
if self.actions['visited'] != 'ok':
object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(TermListDescView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['actions'] = self.actions
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['s']:
try:
concepts = IdentifierConceptListDesc.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
for x in concepts:
id_identifier = x.get('identifier_id')
has_register = IdentifierDesc.objects.filter(id=id_identifier,thesaurus_id=self.request.GET.get("ths")).exists()
if has_register:
# print 'ID pertinente',id_identifier
# IdentifierDesc
context['id_register_objects'] = IdentifierConceptListDesc.objects.filter(
concept_ui=self.actions['s'].strip(),identifier_id=id_identifier
).values(
# IdentifierConceptListDesc
'id',
'concept_ui',
)
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
except IdentifierDesc.DoesNotExist:
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
return context
def TermListDescModification(request,term_id, ths, term_ori):
# Descobre qual é o identifier_concept_id do termo destino
id_concept_destino = TermListDesc.objects.filter(id=term_id).values('identifier_concept_id')
# Descobre qual é o identifier_id do conceito
identifier_id_destino = IdentifierConceptListDesc.objects.filter(id=id_concept_destino).values('identifier_id')
identifier_id_destino = identifier_id_destino[0].get('identifier_id')
id_concept_destino = id_concept_destino[0].get('identifier_concept_id')
# Descobre qual é o identifier_concept_id do termo origem
term_origem_values = TermListDesc.objects.filter(id=term_ori).values('identifier_concept_id','term_ui')
id_concept_origem = term_origem_values[0].get('identifier_concept_id')
term_ui_origem = term_origem_values[0].get('term_ui')
qtd_id_concept_origem = TermListDesc.objects.filter(identifier_concept_id=id_concept_origem)
if len(qtd_id_concept_origem) == 1:
# Quando existe apenas um termo para o conceito, deverá ser atualizado para a informação do novo conceito:
# termlistdesc --> campo identifier_concept_id --> recebe valor do identifier_concept_id do termo destino
# identifierconceptlistdesc --> campo identifier_id --> recebe o valor do identifier_id do conceito destino
# Atualiza o identifier_id do conceito antigo para novo numero identifier_id_destino
IdentifierConceptListDesc.objects.filter(id=id_concept_origem).update(identifier_id=identifier_id_destino,concept_relation_name='NRW',preferred_concept='N')
# Atualiza informações do termo origem e destino
# Prepara informacoes do historico origem
concept_ui_origem = IdentifierConceptListDesc.objects.filter(id=id_concept_origem).values('concept_ui')
concept_ui_origem = concept_ui_origem[0].get('concept_ui')
historical_annotation_old=TermListDesc.objects.filter(id=term_ori).values('id','historical_annotation')
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
# Armazena informacao para histórico destino
historical_annotation_old_origem=historical_annotation_old
# Prepara informacoes do historico destino
concept_ui_destino = IdentifierConceptListDesc.objects.filter(id=id_concept_destino).values('concept_ui')
concept_ui_destino = concept_ui_destino[0].get('concept_ui')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', sent to ' + concept_ui_destino
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
# Atualiza historico da origem
TermListDesc.objects.filter(id=term_ori).update(status=-3,historical_annotation=historical_annotation_new, date_altered=datetime.datetime.now().strftime('%Y-%m-%d'))
# Pesquisa a existencia de um registro existente no destino com o status de migracao - 3
# para isso pesquisa o term_ui de origem e o status=-3
new_term=TermListDesc.objects.filter(id=term_ori).values('status','term_ui','language_code','term_string','concept_preferred_term','is_permuted_term','lexical_tag','record_preferred_term','entry_version','date_created','date_altered','historical_annotation','term_thesaurus','identifier_concept_id',)
term_ui_ori=new_term[0].get('term_ui')
term_string_ori=new_term[0].get('term_string').encode('utf-8')
exist_term=TermListDesc.objects.filter(status=-3, term_ui=term_ui_ori, term_string=term_string_ori, identifier_concept_id=id_concept_destino).values('id','historical_annotation')
if len(exist_term) > 0:
term_id_exist=exist_term[0].get('id')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
# Atualiza o historico do destino
TermListDesc.objects.filter(id=term_id_exist).update(status='1',concept_preferred_term='N',is_permuted_term='N',record_preferred_term='N',historical_annotation=historical_annotation_new, date_altered=datetime.datetime.now().strftime('%Y-%m-%d'))
else:
# Cria nova entrada
item = TermListDesc.objects.create(
status='1',
term_ui=new_term[0].get('term_ui'),
language_code=new_term[0].get('language_code'),
term_string=new_term[0].get('term_string'),
concept_preferred_term='N',
is_permuted_term='N',
lexical_tag=new_term[0].get('lexical_tag'),
record_preferred_term='N',
entry_version=new_term[0].get('entry_version'),
date_created=new_term[0].get('date_created'),
date_altered=datetime.datetime.now().strftime('%Y-%m-%d'),
historical_annotation=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + concept_ui_origem + ';' + historical_annotation_old_origem,
term_thesaurus=new_term[0].get('term_thesaurus'),
identifier_concept_id=id_concept_destino,
)
url = '/thesaurus/descriptors/view/' + term_ori + '?ths=' + ths
return HttpResponseRedirect(url)
class TermCreateDescConfirm(LoginRequiredView, TemplateView):
template_name = 'thesaurus/confirm_create_desc.html'
def get_context_data(self, **kwargs):
context = super(TermCreateDescConfirm, self).get_context_data(**kwargs)
thesaurus_name = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_name')
context['thesaurus_name'] = thesaurus_name[0].get('thesaurus_name')
return context
def TermCreateDescDo(request, ths):
term_string = request.GET.get("term_string")
language_code = request.GET.get("language_code")
term_ui = request.GET.get("term_ui")
term_id = request.GET.get("term_id")
return redirect('/thesaurus/descriptors/new/?ths=' + ths + '&term=' + term_string + '&language_code=' + language_code + '&term_ui=' + term_ui + '&term_id=' + term_id)
class ConceptCreateDescConfirm(LoginRequiredView, TemplateView):
template_name = 'thesaurus/confirm_create_register_desc.html'
def get_context_data(self, **kwargs):
context = super(ConceptCreateDescConfirm, self).get_context_data(**kwargs)
thesaurus_name = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_name')
context['thesaurus_name'] = thesaurus_name[0].get('thesaurus_name')
return context
def ConceptCreateDescDo(request, ths):
term_string = request.GET.get("term_string")
language_code = request.GET.get("language_code")
concept_id = request.GET.get("concept_id")
term_id = request.GET.get("term_id")
created_by = request.GET.get("created_by")
# Descobrindo qual é o descriptor_ui do registro origem
identifier_id_ori = IdentifierConceptListDesc.objects.filter(id=concept_id).values('identifier_id')
identifier_id_ori = identifier_id_ori[0].get('identifier_id')
descriptor_ui_ori = IdentifierDesc.objects.filter(id=identifier_id_ori).values('descriptor_ui')
descriptor_ui_ori = descriptor_ui_ori[0].get('descriptor_ui')
# Verifica se já existe anotação no historico
has_hist=IdentifierConceptListDesc.objects.filter(id=concept_id).values('historical_annotation')
if has_hist:
historical_annotation_old=has_hist[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + str(descriptor_ui_ori)
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
created_time=datetime.datetime.now().strftime('%Y-%m-%d')
created_time = created_time.encode('utf-8')
# Get sequential number to write to decs_code
try:
seq = code_controller.objects.get(thesaurus=ths)
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller.DoesNotExist:
seq = code_controller(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
decs_code=nseq
# Get thesaurus_acronym to create new ID format to descriptor_ui field
try:
acronym = Thesaurus.objects.filter(id=ths).values('thesaurus_acronym')
# recupera o acronimo e transforma em maiusuclo
acronym = str(acronym[0].get('thesaurus_acronym')).upper()
# utiliza self.object.decs_code para compor descriptor_ui
zseq = str(nseq).zfill(6) # preenche zeros a esquerda
descriptor_ui = 'D' + acronym + zseq
except Thesaurus.DoesNotExist:
id_thesaurus = str(self.object.id)
print 'Warning! - No thesaurus_acronym for id -->',id_thesaurus
add_reg = IdentifierDesc(descriptor_class='', descriptor_ui=descriptor_ui, decs_code=decs_code, external_code='', nlm_class_number='', date_created=created_time, created_by_id=created_by, thesaurus_id=ths)
add_reg.save()
# Descobrindo último ID inserido
last_id = IdentifierDesc.objects.filter(thesaurus_id=ths).order_by('id').last()
# print '[last_id - ' ,last_id,' ]'
# Atualiza identifier_id antigo para novo id, apaga concept_relation_name, atualiza preferred_concept como preferido e atualiza hsitórico
update_field = IdentifierConceptListDesc.objects.get(id=concept_id)
update_field.identifier_id = last_id
update_field.concept_relation_name = ""
update_field.preferred_concept = "Y"
update_field.historical_annotation = historical_annotation_new
update_field.save()
# Atualiza record_preferred_term dos Termos que foram elegidos como preferidos no registro novo
update_registers = TermListDesc.objects.filter(identifier_concept_id=concept_id, concept_preferred_term='Y', record_preferred_term='N')
# print '[ DEBUG]', update_registers
if update_registers:
for upd in update_registers:
# print '---> ',upd
TermListDesc.objects.filter(id=str(upd)).update(record_preferred_term='Y')
return redirect('/thesaurus/descriptors/view/' + term_id + '?ths=' + ths)
class ConceptListDescCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create Concept and Term
"""
model = IdentifierConceptListDesc
template_name = 'thesaurus/descriptor_new_concept.html'
form_class = IdentifierConceptListDescForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(ConceptListDescCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
id_concept = self.object.id
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListDesc.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
formset_concept = ConceptListDescFormSet(self.request.POST, instance=self.object)
formset_term = TermListDescFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
formset_term_valid = formset_term.is_valid()
if (form_valid and formset_concept_valid and formset_term_valid):
# Brings form variables to check if it already exists
term_string = self.request.POST.get("termdesc-0-term_string")
language_code = self.request.POST.get("termdesc-0-language_code")
term_thesaurus = self.request.GET.get("ths")
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
self.object.identifier_id = int(self.request.POST.get("identifier_id"))
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
form.save()
# Update concept_ui with a new format
try:
created_concept_id = int(IdentifierConceptListDesc.objects.latest('id').id)
update_concept_field = IdentifierConceptListDesc.objects.get(id=created_concept_id)
# preenche zeros a esquerda
zseq = str(created_concept_id).zfill(8)
update_concept_field.concept_ui = 'FD' + zseq
update_concept_field.save()
except IdentifierConceptListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Concept'
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListDesc.objects.latest('id').id)
update_field = TermListDesc.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'd' + zseq
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
# Update created_date
try:
created_id = int(TermListDesc.objects.latest('id').id)
update_date_created = TermListDesc.objects.get(id=created_id)
update_date_created.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
update_date_created.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Concept already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
msg_erro=msg_erro,
))
def get_context_data(self, **kwargs):
context = super(ConceptListDescCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.method == 'GET':
context['formset_concept'] = ConceptListDescFormSet(instance=self.object)
context['formset_term'] = TermListDescFormSet(instance=self.object)
return context
class ConceptListDescUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update concept
"""
model = IdentifierConceptListDesc
template_name = 'thesaurus/descriptor_edit_concept.html'
form_class = IdentifierConceptListDescForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(ConceptListDescUpdateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( int(self.request.POST.get("termdesc__id")), ths )
def form_valid(self, form):
formset_concept = ConceptListDescFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
if (form_valid and formset_concept_valid):
self.object = form.save(commit=False)
self.object.identifier_id = int(self.request.POST.get("identifier_id"))
formset_concept.instance = self.object
formset_concept.save()
form.save()
# Necessário atualizar também os termos que são preferidos no conceito para também preferidos do registro
# print 'id ----->',self.object.id
check_preferred_concept = self.request.POST.get("preferred_concept")
# print 'check_preferred_concept ---->',check_preferred_concept
if check_preferred_concept == 'Y':
TermListDesc.objects.filter(identifier_concept_id=self.object.id, concept_preferred_term='Y').update(record_preferred_term='Y')
else:
TermListDesc.objects.filter(identifier_concept_id=self.object.id, concept_preferred_term='Y').update(record_preferred_term='N')
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
)
)
def get_context_data(self, **kwargs):
context = super(ConceptListDescUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.method == 'GET':
context['formset_concept'] = ConceptListDescFormSet(instance=self.object)
return context
class TermListDescCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create TermListDesc
"""
# model = TermListDesc
template_name = 'thesaurus/descriptor_new_term.html'
form_class = TermListDescUniqueForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(TermListDescCreateView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TermListDescCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.POST:
context['formset_toccurrence'] = TheraurusOccurrenceListDescFormSet(self.request.POST)
else:
context['formset_toccurrence'] = TheraurusOccurrenceListDescFormSet()
return context
def form_valid(self, form):
formset_toccurrence = TheraurusOccurrenceListDescFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_toccurrence_valid = formset_toccurrence.is_valid()
if (form_valid and formset_toccurrence_valid):
# Brings form variables to check if it already exists
term_string = self.request.POST.get("term_string")
language_code = self.request.POST.get("language_code")
concept_preferred_term = self.request.POST.get("concept_preferred_term")
record_preferred_term = self.request.POST.get("record_preferred_term")
identifier_concept_id = self.request.POST.get("identifier_concept_id")
term_thesaurus = self.request.GET.get("ths")
if concept_preferred_term == 'Y' and record_preferred_term == 'Y':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="Y",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if concept_preferred_term == 'Y' and record_preferred_term == 'N':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="N",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if ( concept_preferred_term == 'Y' and record_preferred_term == 'Y' ) or ( concept_preferred_term == 'Y' and record_preferred_term == 'N' ):
has_term_config = TermListDesc.objects.filter( q_status_published ).values('id')
if len(has_term_config) > 0:
msg_erro = _("This configuration already exists for this concept!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
else:
# Para poder criar um termo novo, não deve existir
# -1 - Rascunho
# 1 - Publicado
# 5 - Histórico
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
# prove the current date if you are not informed on the form
if not self.object.date_created:
self.object.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object = form.save(commit=True)
formset_toccurrence.instance = self.object
formset_toccurrence.save()
form.save()
registry_language = self.request.POST.get("language_code")
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListDesc.objects.latest('id').id)
update_field = TermListDesc.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'd' + zseq
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
else:
# Para poder criar um termo novo, não deve existir
# -1 - Rascunho
# 1 - Publicado
# 5 - Histórico
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
# prove the current date if you are not informed on the form
if not self.object.date_created:
self.object.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object = form.save(commit=True)
formset_toccurrence.instance = self.object
formset_toccurrence.save()
form.save()
registry_language = self.request.POST.get("language_code")
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListDesc.objects.latest('id').id)
update_field = TermListDesc.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'd' + zseq
update_field.save()
except TermListDesc.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( self.object.id, ths )
class TermListDescUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update Term
"""
model = TermListDesc
template_name = 'thesaurus/descriptor_edit_term.html'
form_class = TermListDescUniqueForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(TermListDescUpdateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
formset_toccurrence = TheraurusOccurrenceListDescFormSet(self.request.POST, instance=self.object)
# Armazena string de term_string para popular historico
term_string_current = TermListDesc.objects.filter(id=self.object.id).values('term_string','concept_preferred_term','record_preferred_term','historical_annotation')
for y in term_string_current:
term_string_old=y.get('term_string')
concept_preferred_term_old=y.get('concept_preferred_term')
record_preferred_term_old=y.get('record_preferred_term')
historical_annotation_old=y.get('historical_annotation')
# print 'Current - TERM ----->',term_string_old
# print 'Current - Historico --->',historical_annotation_old
# Brings form variables to check if it already exists
term_string = self.request.POST.get("term_string")
language_code = self.request.POST.get("language_code")
concept_preferred_term = self.request.POST.get("concept_preferred_term")
record_preferred_term = self.request.POST.get("record_preferred_term")
identifier_concept_id = self.request.POST.get("identifier_concept_id")
term_thesaurus = self.request.GET.get("ths")
# Username
user_data = additional_user_info(self.request)
for user_name in user_data:
username=user_data.get('user_name').encode('utf-8')
break
v998='^d' + datetime.datetime.now().strftime('%Y-%m-%d') + '^h' + term_string_old + '^u' + username + '^t'
# Se ocorreu alteracao
# Grava configuração anterior
# concept_preferred_term_old
# Decide prenchimento com [ 01, 02, 03, 04 ou 16 ] ou [ 51, 52, 53, 54 e 516]
if concept_preferred_term_old == 'Y':
if language_code == 'en':
sub_t='01'
if language_code == 'es':
sub_t='02'
if language_code == 'pt-br':
sub_t='03'
if language_code == 'es-es':
sub_t='04'
if language_code == 'fr':
sub_t='16'
else:
if language_code == 'en':
sub_t='51'
if language_code == 'es':
sub_t='52'
if language_code == 'pt-br':
sub_t='53'
if language_code == 'es-es':
sub_t='54'
if language_code == 'fr':
sub_t='516'
term_string_historical = v998 + sub_t
if concept_preferred_term == 'Y' and record_preferred_term == 'Y':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="Y",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if concept_preferred_term == 'Y' and record_preferred_term == 'N':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "N"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="N",
identifier_concept_id=identifier_concept_id,
# status=1,
)
# Avalia conceito PREFERIDO
if ( concept_preferred_term == 'Y' and record_preferred_term == 'Y' ) or ( concept_preferred_term == 'Y' and record_preferred_term == 'N' ):
has_term_config = TermListDesc.objects.filter( q_status_published ).values('id').exclude(id=self.object.id,)
if len(has_term_config) > 0:
msg_erro = _("This configuration already exists for this concept!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
else:
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).exclude(id=self.object.id).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if has_equal:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
else:
# Se não existe configuração para o termo e não existe um termo igual pode prosseguir com a atualização.
form_valid = form.is_valid()
formset_toccurrence_valid = formset_toccurrence.is_valid()
if (form_valid and formset_toccurrence_valid):
self.object = form.save(commit=False)
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object.date_altered = datetime.datetime.now().strftime('%Y-%m-%d')
if len(historical_annotation_old) > 0:
term_string_historical=term_string_historical + ';' + historical_annotation_old
self.object.historical_annotation = term_string_historical
self.object = form.save(commit=True)
formset_toccurrence.instance = self.object
formset_toccurrence.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
))
else:
# Avalia conceito NÃO PREFERIDO
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListDesc.objects.filter( q_status_draft | q_status_published | q_status_historical ).exclude(id=self.object.id).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if has_equal:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
msg_erro=msg_erro,
))
else:
form_valid = form.is_valid()
formset_toccurrence_valid = formset_toccurrence.is_valid()
if (form_valid and formset_toccurrence_valid):
self.object = form.save(commit=False)
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object.date_altered = datetime.datetime.now().strftime('%Y-%m-%d')
term_string_historical = v998 + sub_t
if len(historical_annotation_old) > 0:
term_string_historical=term_string_historical + ';' + historical_annotation_old
self.object.historical_annotation = term_string_historical
self.object = form.save(commit=True)
formset_toccurrence.instance = self.object
formset_toccurrence.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
formset_toccurrence=formset_toccurrence,
))
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( self.object.id, ths )
def get_context_data(self, **kwargs):
context = super(TermListDescUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.POST:
context['formset_toccurrence'] = TheraurusOccurrenceListDescFormSet(self.request.POST)
else:
context['formset_toccurrence'] = TheraurusOccurrenceListDescFormSet(instance=self.object)
return context
class legacyInformationDescCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create legacy information
"""
model = legacyInformationDesc
template_name = 'thesaurus/descriptor_new_legacy.html'
form_class = legacyInformationDescForm
def get_success_url(self):
id_identifier = self.request.GET.get("identifier_id")
# Search ID of the first concept of this record
concepts_of_registry = IdentifierConceptListDesc.objects.filter(identifier_id=id_identifier).values('id')
id_concept = concepts_of_registry[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListDesc.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
if form.is_valid():
self.object = form.save(commit=False)
self.object.identifier_id = self.request.POST.get("identifier_id")
self.object = form.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(legacyInformationDescCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class legacyInformationDescUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update a legacy information
"""
model = legacyInformationDesc
template_name = 'thesaurus/descriptor_edit_legacy.html'
form_class = legacyInformationDescForm
def get_success_url(self):
id_identifier = self.request.GET.get("identifier_id")
# Search ID of the first concept of this record
concepts_of_registry = IdentifierConceptListDesc.objects.filter(identifier_id=id_identifier).values('id')
id_concept = concepts_of_registry[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListDesc.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/descriptors/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
if form.is_valid():
self.object = form.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(legacyInformationDescUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class PageViewDesc(LoginRequiredView, DetailView):
"""
Used as class DetailView to list the result
"""
model = TermListDesc
template_name = 'thesaurus/page_view_desc.html'
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(PageViewDesc, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
lang_code = get_language()
context = super(PageViewDesc, self).get_context_data(**kwargs)
if self.object:
# IdentifierConceptListDesc - recover pk from concept
id_concept = IdentifierConceptListDesc.objects.filter(
id=self.object.identifier_concept_id,
).values('identifier_id').distinct()
# Usado para criar novo conceito
for concept in id_concept:
context['id_concept_new'] = concept
# IdentifierConceptListDesc - retrieves pk's that has even identifier_id - can bring more than 1
ids = IdentifierConceptListDesc.objects.filter(
identifier_id=id_concept,
).values('id')
# IdentifierDesc
# Brings information to Active Descriptor Record
context['identifierdesc_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
)
# IdentifierDesc
context['id_register_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# IdentifierDesc
'id',
'thesaurus',
'descriptor_class',
'descriptor_ui',
'decs_code',
'external_code',
'nlm_class_number',
'date_created',
'date_revised',
'date_established',
'abbreviation',
)
context['description_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# DescriptionDesc
'descriptiondesc__identifier_id',
'descriptiondesc__language_code',
'descriptiondesc__annotation',
'descriptiondesc__history_note',
'descriptiondesc__online_note',
'descriptiondesc__public_mesh_note',
'descriptiondesc__consider_also',
)
# Usado para criar lista de Indexacao Previa
context['previous_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# PreviousIndexingListDesc
'previousdesc__identifier_id',
'previousdesc__previous_indexing',
'previousdesc__language_code',
).distinct().order_by('previousdesc__previous_indexing')
# Usado para criar lista de Acao farmacologica
context['pharmaco_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# PharmacologicalActionList
'pharmacodesc__identifier_id',
'pharmacodesc__term_string',
'pharmacodesc__descriptor_ui',
'pharmacodesc__language_code',
).distinct().order_by('pharmacodesc__term_string')
# Usado para criar lista de tree number
context['tree_numbers_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# TreeNumbersListDesc
'dtreenumbers__identifier_id',
'dtreenumbers__tree_number',
).distinct().order_by('dtreenumbers__tree_number')
# Usado para criar lista de See Also - related
context['related_objects'] = IdentifierDesc.objects.filter(
id=id_concept,
).values(
# TreeNumbersListDesc
'relateddesc__term_string',
'relateddesc__descriptor_ui',
).distinct().order_by('relateddesc__term_string')
context['term_string_info_preferred_objects'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,termdesc__concept_preferred_term='Y',termdesc__record_preferred_term='Y',
).order_by('identifier_id',
'termdesc__identifier_concept_id',
'-preferred_concept',
'-termdesc__concept_preferred_term',
'termdesc__language_code',
'termdesc__term_string',
).values(
'id',
'termdesc__status',
'termdesc__term_string',
'termdesc__language_code',
'identifier_id',
)
context['entry_terms_objects'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,termdesc__status=1,termdesc__record_preferred_term='N',
).order_by('identifier_id',
'termdesc__language_code',
'termdesc__term_string',
).values(
'id',
'termdesc__id',
'termdesc__term_string',
'termdesc__language_code',
)
context['scope_note_objects'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by('identifier_id',
).values(
'conceptdesc__language_code',
'conceptdesc__scope_note',
).distinct()
context['legacy_objects'] = legacyInformationDesc.objects.filter(
identifier=id_concept,
).values(
'id',
'pre_codificado',
'desastre',
'reforma_saude',
'geografico',
'mesh',
'pt_lilacs',
'nao_indexavel',
'homeopatia',
'repidisca',
'saude_publica',
'exploded',
'geog_decs',
'identifier_id',
)
context['entrycombination_objects'] = EntryCombinationListDesc.objects.filter(
identifier=id_concept,
).values(
'id',
'ecin_qualif',
'ecin_id',
'ecout_desc',
'ecout_desc_id',
'ecout_qualif',
'ecout_qualif_id',
'identifier_id',
)
# Usado para mostrar informações de conceitos e termos Preferidos
context['identifierconceptlist_objects_preferred'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by(
'-preferred_concept',
'-termdesc__concept_preferred_term',
'termdesc__language_code',
'termdesc__term_string',
).values(
'id',
'identifier_id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'casn1_name',
'registry_number',
'conceptdesc__identifier_concept_id',
'conceptdesc__language_code',
'conceptdesc__scope_note',
'termdesc__id',
'termdesc__identifier_concept_id',
'termdesc__status',
'termdesc__term_ui',
'termdesc__language_code',
'termdesc__term_string',
'termdesc__concept_preferred_term',
'termdesc__is_permuted_term',
'termdesc__lexical_tag',
'termdesc__record_preferred_term',
'termdesc__entry_version',
'termdesc__date_created',
'termdesc__date_altered',
'termdesc__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Preferidos para Aba de Conceitos
context['identifierconceptlist_objects_preferred_for_concepts'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by(
'-preferred_concept',
'-termdesc__concept_preferred_term',
'termdesc__language_code',
'termdesc__term_string',
).values(
'identifier_id',
'id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'termdesc__id',
'termdesc__identifier_concept_id',
'termdesc__status',
'termdesc__term_ui',
'termdesc__language_code',
'termdesc__term_string',
'termdesc__concept_preferred_term',
'termdesc__is_permuted_term',
'termdesc__lexical_tag',
'termdesc__record_preferred_term',
'termdesc__entry_version',
'termdesc__date_created',
'termdesc__date_altered',
'termdesc__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Não Preferidos
context['identifierconceptlist_objects'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,preferred_concept='N',
).order_by('identifier_id',
'termdesc__identifier_concept_id',
'-preferred_concept',
'-termdesc__concept_preferred_term',
'termdesc__language_code',
'termdesc__term_string',
).values(
'id',
'identifier_id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'casn1_name',
'registry_number',
'conceptdesc__identifier_concept_id',
'conceptdesc__language_code',
'conceptdesc__scope_note',
'termdesc__id',
'termdesc__identifier_concept_id',
'termdesc__status',
'termdesc__term_ui',
'termdesc__language_code',
'termdesc__term_string',
'termdesc__concept_preferred_term',
'termdesc__is_permuted_term',
'termdesc__lexical_tag',
'termdesc__record_preferred_term',
'termdesc__entry_version',
'termdesc__date_created',
'termdesc__date_altered',
'termdesc__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Não Preferidos para Aba Conceitos
context['identifierconceptlist_objects_for_concepts'] = IdentifierConceptListDesc.objects.filter(
identifier=id_concept,preferred_concept='N',
).order_by('identifier_id',
'termdesc__identifier_concept_id',
'-preferred_concept',
'-termdesc__concept_preferred_term',
'termdesc__language_code',
'termdesc__term_string',
).values(
'identifier_id',
'id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'termdesc__id',
'termdesc__identifier_concept_id',
'termdesc__status',
'termdesc__term_ui',
'termdesc__language_code',
'termdesc__term_string',
'termdesc__concept_preferred_term',
'termdesc__is_permuted_term',
'termdesc__lexical_tag',
'termdesc__record_preferred_term',
'termdesc__entry_version',
'termdesc__date_created',
'termdesc__date_altered',
'termdesc__historical_annotation',
).distinct()
# Traz abbreviation e term_string do idioma da interface no momento
id_abbrev = IdentifierDesc.objects.filter(id=id_concept).values('abbreviation')
translation = IdentifierQualif.objects.filter(id__in=id_abbrev).order_by('abbreviation') # Usado __in pois pode haver mais que um resultado
context['allowable_qualifiers_objects'] = translation
# Informacoes para log
# Registro
# ID do model
id_ctype_identifierdesc = ContentType.objects.filter(model='identifierdesc').values('id')
context['id_ctype_identifierdesc'] = id_ctype_identifierdesc[0].get('id')
# ID do registro
id_identifierdesc = IdentifierDesc.objects.filter(id=id_concept).values('id')
context['id_identifierdesc'] = id_identifierdesc[0].get('id')
# # Concept e Term
# id_ctype_identifierdesc = ContentType.objects.filter(model='identifierdesc').values('id')
# context['id_ctype_identifierdesc'] = id_ctype_identifierdesc[0].get('id')
# # ID do registro
# id_identifierdesc = IdentifierDesc.objects.filter(id=id_concept).values('id')
# context['id_identifierdesc'] = id_identifierdesc[0].get('id')
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
# Qualifiers -------------------------------------------------------------------------
class QualifUpdate(LoginRequiredView):
"""
Handle creation and update of Qualifiers objects
"""
model = IdentifierQualif
# success_url = reverse_lazy('list_descriptor')
success_url = reverse_lazy('create_concept_termqualif')
form_class = IdentifierQualifForm
template_name = "thesaurus/qualifier_form_step1.html"
def form_valid(self, form):
formset_descriptor = DescriptionQualifFormSet(self.request.POST, instance=self.object)
formset_treenumber = TreeNumbersListQualifFormSet(self.request.POST, instance=self.object)
# run all validation before for display formset errors at form
form_valid = form.is_valid()
formset_descriptor_valid = formset_descriptor.is_valid()
formset_treenumber_valid = formset_treenumber.is_valid()
if (form_valid and
formset_descriptor_valid and
formset_treenumber_valid
):
# self.object = form.save()
# Verifica se foi passado algum valor para formset_treenumber
tree_number_existentes=''
exist_tree_numbers=0
for f in formset_treenumber:
if f.cleaned_data is not None:
fields_t = f.cleaned_data
content_tree_number = fields_t.get('tree_number')
# Essa variável é do cleaned_data e diz se o registro foi apagado ou não no formulário
# Se for True foi apagado
status_preenchimento = fields_t.get('DELETE')
if status_preenchimento == False:
exist_tree_numbers = exist_tree_numbers + 1
# Verifica se ja existe cadastrado o tree_number para este tesauro
result_tree_number = TreeNumbersListQualif.objects.filter(tree_number=content_tree_number).values('identifier_id')
if result_tree_number is not None:
for t in result_tree_number:
identifier_id_existent_tree_number = t.get('identifier_id')
# Checks if the record is for the thesaurus being worked on
res_existent_thesaurus_id = IdentifierQualif.objects.filter(id=identifier_id_existent_tree_number).values('thesaurus_id')
existent_thesaurus_id=res_existent_thesaurus_id[0].get('thesaurus_id')
# Brings id of thesaurus currently operating
environment_thesaurus_id = self.request.GET.get("ths")
# If tree_number exists in same thesaurus creates error
if int(environment_thesaurus_id) == int(existent_thesaurus_id):
tree_number_existentes = tree_number_existentes + content_tree_number + ' '
# Condição para poder criar registro
if not tree_number_existentes and exist_tree_numbers > 0:
# Bring the choiced language_code from the first form
# registry_language = formset_descriptor.cleaned_data[0].get('language_code')
registry_language = self.request.GET.get("language_code")
# Get sequential number to write to decs_code
self.object = form.save(commit=False)
ths = self.request.GET.get("ths")
try:
seq = code_controller.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller.DoesNotExist:
seq = code_controller(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
self.object.decs_code = nseq
self.object = form.save(commit=True)
# Get thesaurus_acronym to create new ID format to descriptor_ui field
self.object = form.save(commit=False)
try:
acronym = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_acronym')
# recupera o acronimo e transforma em maiusuclo
acronym = str(acronym[0].get('thesaurus_acronym')).upper()
# utiliza self.object.decs_code para compor qualifier_ui
zseq = str(self.object.decs_code).zfill(6) # preenche zeros a esquerda
self.object.qualifier_ui = 'Q' + acronym + zseq
except Thesaurus.DoesNotExist:
id_thesaurus = str(self.object.id)
print 'Warning! - No thesaurus_acronym for id -->',id_thesaurus
self.object = form.save(commit=True)
formset_descriptor.instance = self.object
formset_descriptor.save()
formset_treenumber.instance = self.object
formset_treenumber.save()
form.save()
# Essas variaveis dizem respeito a criação de novo registro a partir de um termo existente
# Quando existirem serão repassadas para a faze de criação de conceito
if self.request.GET.get("term_ui") and self.request.GET.get("term_id"):
term_ui_alter = self.request.GET.get("term_ui")
term_id_alter = self.request.GET.get("term_id")
return redirect(reverse('create_concept_termqualif') + '?ths=' + self.request.GET.get("ths") + '&' + 'registry_language=' + registry_language + '&term=' + self.request.GET.get("term") + '&term_ui_alter=' + term_ui_alter + '&term_id_alter=' + term_id_alter)
else:
return redirect(reverse('create_concept_termqualif') + '?ths=' + self.request.GET.get("ths") + '&' + 'registry_language=' + registry_language + '&term=' + self.request.GET.get("term"))
else:
if exist_tree_numbers == 0:
msg_erro = _("Hierarchical level")
else:
msg_erro = _("already exists!!!") + ' -----> ' + tree_number_existentes
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
msg_erro=msg_erro,
)
)
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
)
)
def form_invalid(self, form):
# force use of form_valid method to run all validations
return self.form_valid(form)
def get_context_data(self, **kwargs):
context = super(QualifUpdate, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['language_system'] = get_language()
if self.request.method == 'GET':
context['formset_descriptor'] = DescriptionQualifFormSet(instance=self.object)
context['formset_treenumber'] = TreeNumbersListQualifFormSet(instance=self.object)
return context
class QualifCreateView(QualifUpdate, CreateView):
"""
Used as class view to create Qualifiers
"""
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(QualifCreateView, self).dispatch(*args, **kwargs)
class QualifDeleteView(QualifUpdate, DeleteView):
"""
Used as class view to delete Qualifier
"""
model = IdentifierQualif
template_name = 'thesaurus/qualifier_confirm_delete.html'
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/%s' % ths
class QualifListDescChk(LoginRequiredView, ListView):
"""
Used to verify if already exist the term
"""
template_name = "thesaurus/qualifier_form_step0.html"
context_object_name = "registers"
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(QualifListDescChk, self).dispatch(*args, **kwargs)
def get_queryset(self):
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
return object_list
def get_context_data(self, **kwargs):
context = super(QualifListDescChk, self).get_context_data(**kwargs)
context['choiced_thesaurus_info'] = Thesaurus.objects.filter(id=self.request.GET.get("thesaurus"))
context['term_choiced'] = self.actions['term_string']
context['filter_language'] = self.actions['filter_language']
return context
def render_to_response(self, context):
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['term_string'] and self.actions['filter_language']:
term_choiced = self.actions['term_string']
# Para poder criar um termo novo, não deve existir
# -1 - Rascunho
# 1 - Publicado
# 5 - Histórico
# Search by draft record
q_status_draft = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=self.actions['term_string'],
language_code=self.actions['filter_language'],
term_thesaurus=self.request.GET.get("thesaurus"),
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_choiced.encode('utf-8'):
# print 'Igual-->',t,' - ',term_choiced.encode('utf-8')
has_equal=t
if not has_equal:
return redirect('/thesaurus/qualifiers/new/?ths=' + self.request.GET.get("thesaurus") + '&term=' + self.actions['term_string'] + '&language_code=' + self.actions['filter_language'])
return super(QualifListDescChk, self).render_to_response(context)
class QualifRegisterUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update a register of qualifier
"""
model = IdentifierQualif
template_name = 'thesaurus/qualifier_edit_register.html'
form_class = IdentifierQualifForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(QualifRegisterUpdateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
id_register = self.object.id
# Search ID of the first concept of the record to later search the first term of the concept
concepts_of_register = IdentifierConceptListQualif.objects.filter(identifier_id=id_register).values('id')
id_concept = concepts_of_register[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListQualif.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
formset_descriptor = DescriptionQualifFormSet(self.request.POST, instance=self.object)
formset_treenumber = TreeNumbersListQualifFormSet(self.request.POST, instance=self.object)
# run all validation before for display formset errors at form
form_valid = form.is_valid()
formset_descriptor_valid = formset_descriptor.is_valid()
formset_treenumber_valid = formset_treenumber.is_valid()
if (form_valid and
formset_descriptor_valid and
formset_treenumber_valid
):
# Verifica se foi passado algum valor para formset_treenumber
tree_number_existentes=''
# Utilizado para verificar se o form está totalmente vazio - o que não deve ocorrer
form_vazio=True
exist_tree_numbers=0
for f in formset_treenumber:
if f.cleaned_data is not None:
fields_t = f.cleaned_data
content_tree_number = fields_t.get('tree_number')
identifier_id = fields_t.get('identifier')
# Onde identifier_id = 'identifier': <IdentifierDesc: 34244>
# Exemplo
# ---> {'identifier': <IdentifierDesc: 34244>, 'tree_number': u'SP4.026.307.808.100', u'id': <TreeNumbersListDesc: 68760>, u'DELETE': False}
# Para utilizar referenciar identifier_id.id
# Essa variável é do cleaned_data e diz se o registro foi apagado ou não no formulário
# Se for True foi apagado
status_preenchimento = fields_t.get('DELETE')
if status_preenchimento == False:
form_vazio=False
exist_tree_numbers = exist_tree_numbers + 1
# Verifica se ja existe cadastrado o tree_number para este tesauro
result_tree_number = TreeNumbersListQualif.objects.filter(tree_number=content_tree_number).exclude(identifier_id=identifier_id.id).values('identifier_id')
if result_tree_number is not None:
for t in result_tree_number:
identifier_id_existent_tree_number = t.get('identifier_id')
# Checks if the record is for the thesaurus being worked on
res_existent_thesaurus_id = IdentifierQualif.objects.filter(id=identifier_id_existent_tree_number).values('thesaurus_id')
existent_thesaurus_id=res_existent_thesaurus_id[0].get('thesaurus_id')
# Brings id of thesaurus currently operating
environment_thesaurus_id = self.request.GET.get("ths")
# If tree_number exists in same thesaurus creates error
if int(environment_thesaurus_id) == int(existent_thesaurus_id):
tree_number_existentes = tree_number_existentes + content_tree_number + ' '
# Condição para poder criar registro
if not tree_number_existentes and exist_tree_numbers > 0 and form_vazio==False:
# Bring the choiced language_code from the first form
registry_language = formset_descriptor.cleaned_data[0].get('language_code')
self.object = form.save()
formset_descriptor.instance = self.object
formset_descriptor.save()
formset_treenumber.instance = self.object
formset_treenumber.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
if form_vazio == True:
ths = self.request.GET.get("ths")
return redirect('/thesaurus/qualifiers?ths=' + ths)
elif exist_tree_numbers == 0:
msg_erro = _("Hierarchical level")
else:
msg_erro = _("already exists!!!") + ' -----> ' + tree_number_existentes
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
msg_erro=msg_erro,
)
)
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_descriptor=formset_descriptor,
formset_treenumber=formset_treenumber,
)
)
def form_invalid(self, form):
# force use of form_valid method to run all validations
return self.form_valid(form)
def get_context_data(self, **kwargs):
context = super(QualifRegisterUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['language_system'] = get_language()
if self.request.method == 'GET':
context['formset_descriptor'] = DescriptionQualifFormSet(instance=self.object)
context['formset_treenumber'] = TreeNumbersListQualifFormSet(instance=self.object)
return context
class QualifListView(LoginRequiredView, ListView):
"""
List qualifier records
"""
template_name = "thesaurus/qualifier_list.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
elif self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
elif self.request.POST.get("choiced_thesaurus"):
environment_thesaurus_id=self.request.POST.get("choiced_thesaurus")
else:
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
environment_thesaurus_id=self.actions['choiced_thesaurus']
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(QualifListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
lang_code = get_language()
object_list = []
registers_indexed = []
concepts_indexed = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
# icontains X exact -------------------------------------------------------------------------------------
if self.actions['exact']:
q_term_string = Q(term_string=self.actions['s'].strip())
else:
q_term_string = Q(term_string__icontains=self.actions['s'].strip())
# term_string
if self.actions['filter_fields'] == 'term_string' and self.actions['exact']:
q_term_string = Q(term_string=self.actions['s'].strip())
else:
if self.actions['filter_fields'] == 'term_string' and not self.actions['exact']:
q_term_string = Q(term_string__icontains=self.actions['s'].strip())
# concept_preferred_term='Y'
q_concept_preferred_term = Q(concept_preferred_term='Y')
# record_preferred_term='Y'
q_record_preferred_term = Q(record_preferred_term='Y')
# status
if self.actions['filter_status']:
q_filter_status = Q(status=self.actions['filter_status'])
# Term
# AND performance for Term ------------------------------------------------------------------------
# Do the initial search in term_string field
if self.actions['s'] and not self.actions['filter_fields']:
object_list = TermListQualif.objects.filter( q_term_string ).filter(term_thesaurus=self.actions['choiced_thesaurus']).exclude(status=-3).order_by('term_string')
else:
# bring all registers
object_list = TermListQualif.objects.all().filter(term_thesaurus=self.actions['choiced_thesaurus']).exclude(status=-3).order_by('term_string')
# term_string
if self.actions['filter_fields'] == 'term_string' and self.actions['s']:
object_list = TermListQualif.objects.filter( q_term_string ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# Concept
# AND performance for Concept ------------------------------------------------------------------------
# when concept_preferred_term='Y' & record_preferred_term='Y'
if self.actions['filter_fields'] == 'concept':
object_list = TermListQualif.objects.filter( q_term_string & q_concept_preferred_term & q_record_preferred_term ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# Abbreviation
# AND performance for Abbreviation --------------------------------------------------------------
if self.actions['filter_fields'] == 'abbreviation':
id_register = IdentifierQualif.objects.filter(abbreviation=self.actions['s'].strip()).values('id')
id_concept = IdentifierConceptListQualif.objects.filter(identifier_id=id_register,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListQualif.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# MESH Qualifier UI
# AND performance for MESH Qualifier UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'qualifier_ui':
id_register = IdentifierQualif.objects.filter(qualifier_ui=self.actions['s'].strip()).values('id')
id_concept = IdentifierConceptListQualif.objects.filter(identifier_id=id_register,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListQualif.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# DeCS Qualifier UI
# AND performance for DeCS Qualifier UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'decs_code':
id_register = IdentifierQualif.objects.filter(decs_code=self.actions['s'].strip()).values('id')
id_concept = IdentifierConceptListQualif.objects.filter(identifier_id=id_register,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListQualif.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# Tree Number
# AND performance for Tree Number --------------------------------------------------------------
if self.actions['filter_fields'] == 'tree_number':
if self.actions['exact']:
id_tree_number = TreeNumbersListQualif.objects.filter(tree_number=self.actions['s'].strip()).values('identifier_id')
else:
id_tree_number = TreeNumbersListQualif.objects.filter(tree_number__icontains=self.actions['s'].strip()).values('identifier_id')
id_concept = IdentifierConceptListQualif.objects.filter(identifier_id__in=id_tree_number,preferred_concept='Y').distinct().values('id')
q_id_concept = Q(identifier_concept_id__in=id_concept)
object_list = TermListQualif.objects.filter( q_concept_preferred_term & q_record_preferred_term & q_id_concept ).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# Concept UI
# AND performance for Concept UI --------------------------------------------------------------
if self.actions['filter_fields'] == 'concept_ui':
concept_identifier_id = IdentifierConceptListQualif.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
id_register = IdentifierQualif.objects.filter(id__in=concept_identifier_id,thesaurus_id=self.actions['choiced_thesaurus']).values('id')
concept_id = IdentifierConceptListQualif.objects.filter(identifier_id=id_register,concept_ui=self.actions['s'].strip()).values('id')
object_list = TermListQualif.objects.filter(identifier_concept_id=concept_id).filter(term_thesaurus=self.actions['choiced_thesaurus']).order_by('term_string')
# status
if self.actions['filter_status']:
object_list = object_list.filter(status=self.actions['filter_status'])
# language
if self.actions['filter_language']:
object_list = object_list.filter(language_code=self.actions['filter_language'])
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
# if self.actions['visited'] != 'ok':
# object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(QualifListView, self).get_context_data(**kwargs)
context['actions'] = self.actions
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['last_created_objects_list'] = TermListQualif.objects.filter(term_thesaurus=self.request.GET.get("ths")).exclude(status=-3).exclude(status=3).exclude(status=5).exclude(date_created__isnull=True).order_by('-date_created','-id')[:10][::-1]
context['last_altered_objects_list'] = TermListQualif.objects.filter(term_thesaurus=self.request.GET.get("ths")).exclude(status=-3).exclude(date_altered__isnull=True).order_by('-date_altered','-id')[:10][::-1]
return context
# FORM 2
# Cria conceito e termo
class QualifConceptTermUpdate(LoginRequiredView):
"""
Used as class view to create ConceptTermUpdate
"""
model = IdentifierConceptListQualif
form_class = IdentifierConceptListQualifForm
template_name = 'thesaurus/qualifier_form_step2.html'
def form_valid(self, form):
formset_concept = ConceptListQualifFormSet(self.request.POST, instance=self.object)
formset_term = TermListQualifFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
formset_term_valid = formset_term.is_valid()
if (form_valid and formset_concept_valid and formset_term_valid):
# Brings form variables to check if it already exists
term_string = self.request.POST.get("termqualif-0-term_string")
language_code = self.request.POST.get("termqualif-0-language_code")
term_thesaurus = self.request.GET.get("ths")
# Se existirem essas variaveis não deverá ser realizado a verificação de existência pois nesse caso
# será forçado a criação de novo registro, e o registro antigo será alterado seu status
if self.request.GET.get("term_ui_alter") and self.request.GET.get("term_id_alter"):
self.object = form.save()
# Get thesaurus_acronym to create new ID format to concept_ui field
self.object = form.save(commit=False)
zseq = str(self.object.id).zfill(8) # preenche zeros a esquerda
self.object.concept_ui = 'FQ' + zseq
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
# Update the created term_ui with a old content - term_ui_alter
try:
created_id = int(TermListQualif.objects.latest('id').id)
update_field = TermListQualif.objects.get(id=created_id)
update_field.term_ui = self.request.GET.get("term_ui_alter")
# descobre id do conceito que o termo antigo pertence
identifier_concept_id = TermListQualif.objects.filter(id=self.request.GET.get("term_id_alter")).values('identifier_concept_id')
identifier_concept_id = identifier_concept_id[0].get('identifier_concept_id')
# descobre concept_ui
concept_ui_origem = IdentifierConceptListQualif.objects.filter(id=identifier_concept_id).values('concept_ui')
concept_ui_origem = concept_ui_origem[0].get('concept_ui')
# coleta informação do histórico do term antigo
historical_annotation_old=TermListQualif.objects.filter(id=self.request.GET.get("term_id_alter")).values('historical_annotation')
if len(historical_annotation_old) > 0:
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8')
update_field.historical_annotation = historical_annotation_new
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
# Update old term register, status and historical_annotation - term_id_alter
try:
# Busca informação do concept_ui do novo termo
created_id = int(TermListQualif.objects.latest('id').id)
# descobre id do conceito que o termo antigo pertence
identifier_concept_id = TermListQualif.objects.filter(id=created_id).values('identifier_concept_id')
identifier_concept_id = identifier_concept_id[0].get('identifier_concept_id')
# descobre concept_ui
concept_ui_destino = IdentifierConceptListQualif.objects.filter(id=identifier_concept_id).values('concept_ui')
concept_ui_destino = concept_ui_destino[0].get('concept_ui')
# coleta informação do histórico do term antigo
historical_annotation_old=TermListQualif.objects.filter(id=self.request.GET.get("term_id_alter")).values('historical_annotation')
if len(historical_annotation_old) > 0:
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - sent to ' + concept_ui_destino
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - sent to ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8')
update_field = TermListQualif.objects.get(id=self.request.GET.get("term_id_alter"))
update_field.status = '-3'
update_field.historical_annotation = historical_annotation_new
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
has_term = TermListQualif.objects.filter(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
).exists()
if not has_term:
self.object = form.save()
# Get thesaurus_acronym to create new ID format to concept_ui field
self.object = form.save(commit=False)
zseq = str(self.object.id).zfill(8) # preenche zeros a esquerda
self.object.concept_ui = 'FQ' + zseq
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
# Get thesaurus_acronym to create new ID format to term_ui field
try:
acronym = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_acronym')
acronym = acronym[0].get('thesaurus_acronym')
except Thesaurus.DoesNotExist:
id_thesaurus = str(self.object.id)
print 'Warning! - No thesaurus_acronym for id -->',id_thesaurus
acronym = ''
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListQualif.objects.latest('id').id)
update_field = TermListQualif.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'q' + zseq
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Concept already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
msg_erro=msg_erro,
))
else:
return self.render_to_response(
self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
)
)
def get_context_data(self, **kwargs):
context = super(QualifConceptTermUpdate, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['language_system'] = get_language()
if IdentifierQualif.objects.count() > 0:
context['next_id'] = int(IdentifierQualif.objects.latest('id').id)
else:
context['next_id'] = 1
if self.request.method == 'GET':
context['formset_concept'] = ConceptListQualifFormSet(instance=self.object)
context['formset_term'] = TermListQualifFormSet(instance=self.object)
return context
class QualifCreateView2(QualifConceptTermUpdate, CreateView):
"""
Used as class view to create qualifier
"""
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(QualifCreateView2, self).dispatch(*args, **kwargs)
def get_success_url(self):
id_concept = self.object.id
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListQualif.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( id_term, ths )
# Pesquisa ID do registro para poder saber qual é o ID do conceito destino
class ConceptListQualifView(LoginRequiredView, ListView):
"""
List descriptor records (used by relationship popup selection window)
"""
template_name = "thesaurus/search_concept_qualif.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def get_queryset(self):
lang_code = get_language()
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['choiced_concept_identifier_id']:
concept_identifier_id = self.actions['choiced_concept_identifier_id']
if self.actions['s']:
try:
id_registro = IdentifierQualif.objects.filter(qualifier_ui=self.actions['s'].strip(),thesaurus=self.request.GET.get("ths")).values('id')
if len(id_registro)>0:
id_registro = id_registro[0].get('id')
# Força somente 1 resultado
object_list = IdentifierConceptListQualif.objects.filter(identifier_id=id_registro).values('identifier_id','termqualif__term_string','termqualif__language_code','termqualif__id')[:1]
except IdentifierQualif.DoesNotExist:
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
if self.actions['visited'] != 'ok':
object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(ConceptListQualifView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['actions'] = self.actions
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['s']:
try:
# Força somente 1 resultado
id_registro = IdentifierQualif.objects.filter(qualifier_ui=self.actions['s'].strip()).values('id')[:1]
if len(id_registro)>0:
id_registro = id_registro[0].get('id')
# IdentifierQualif
context['id_register_objects'] = IdentifierQualif.objects.filter(
id=id_registro,
).values(
# IdentifierQualif
'id',
'thesaurus',
'qualifier_ui',
'decs_code',
'external_code',
'date_created',
'date_revised',
'date_established',
'abbreviation',
)
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
except IdentifierQualif.DoesNotExist:
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
return context
def ConceptListQualifModification(request,term_id, ths, concept_ori):
# Descobre qual é o id do conceito do termo destino
id_concept_destino = TermListQualif.objects.filter(id=term_id).values('identifier_concept_id')
id_concept_destino = id_concept_destino[0].get('identifier_concept_id')
identifier_id_destino = IdentifierConceptListQualif.objects.filter(id=id_concept_destino).values('identifier_id')
identifier_id_destino = identifier_id_destino[0].get('identifier_id')
# Verifica se o conceito é preferido, se for deverá ser escolhido o proximo nao preferido que assumirá a predileção
check_preferred_concept_origem = IdentifierConceptListQualif.objects.filter(id=concept_ori).values('preferred_concept')
check_preferred_concept_origem = check_preferred_concept_origem[0].get('preferred_concept')
# Como o registro em TermListDesc será nao preferido, record_preferred_term deverá ser N obrigatoriamente no destino
# Para isso, devo trazer todos os registros de concept_ori e atualizá-los
TermListQualif.objects.filter(identifier_concept_id=concept_ori).update(record_preferred_term='N')
if check_preferred_concept_origem == 'Y':
# Verifica se o conceito origem tem irmãos, se houver e se o conceito origem for preferido então o segundo conceito assumirá a predileção
check_concept_id_origem = IdentifierConceptListQualif.objects.filter(id=concept_ori).values('identifier_id')
check_concept_id_origem = check_concept_id_origem[0].get('identifier_id')
check_concept_id_origem = IdentifierConceptListQualif.objects.filter(identifier_id=check_concept_id_origem).values('identifier_id')
if len(check_concept_id_origem) > 1:
# Descobre qual o id do primeiro registro nao preferido
check_concept_id_not_preferred = IdentifierConceptListQualif.objects.filter(identifier_id=check_concept_id_origem,preferred_concept='N').values('id')
check_concept_id_not_preferred = check_concept_id_not_preferred[0].get('id')
# Atualiza o identifier_id do conceito antigo para novo numero identifier_id_destino
IdentifierConceptListQualif.objects.filter(id=check_concept_id_not_preferred).update(concept_relation_name='',preferred_concept='Y')
# Necessário atualizar também os termos que são preferidos no conceito para também preferidos do registro
TermListQualif.objects.filter(identifier_concept_id=check_concept_id_not_preferred, concept_preferred_term='Y').update(record_preferred_term='Y')
# Atualiza o identifier_id do conceito antigo para novo numero identifier_id_destino
# Atualiza o campo de histórico gravando informação de que registro foi originado
identifier_id_ori = IdentifierConceptListQualif.objects.filter(id=concept_ori).values('identifier_id')
identifier_id_ori = identifier_id_ori[0].get('identifier_id')
qualifier_ui_ori = IdentifierQualif.objects.filter(id=identifier_id_ori).values('qualifier_ui')
qualifier_ui_ori = qualifier_ui_ori[0].get('qualifier_ui')
# Verifica se já existe anotação no historico
has_hist=IdentifierConceptListQualif.objects.filter(id=concept_ori).exclude(historical_annotation__isnull=True).exclude(historical_annotation='').values('id','historical_annotation')
if len(has_hist)>0:
historical_annotation_old=has_hist[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + str(qualifier_ui_ori)
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
else:
historical_annotation_new=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + str(qualifier_ui_ori)
IdentifierConceptListQualif.objects.filter(id=concept_ori).update(identifier_id=identifier_id_destino,concept_relation_name='NRW',preferred_concept='N', historical_annotation=historical_annotation_new)
url = '/thesaurus/qualifiers/view/' + term_id + '?ths=' + ths
return HttpResponseRedirect(url)
# Pesquisa conceito para poder trazer ID do registro para novo conceito
# Não está sendo utilizado por enquanto
class TermListQualifView(LoginRequiredView, ListView):
"""
List descriptor records (used by relationship popup selection window)
"""
template_name = "thesaurus/search_term_qualif.html"
context_object_name = "registers"
paginate_by = ITEMS_PER_PAGE
def get_queryset(self):
lang_code = get_language()
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['choiced_concept_identifier_id']:
concept_identifier_id = self.actions['choiced_concept_identifier_id']
if self.actions['s']:
try:
# Como o concept_ui pode existir em mais de um tesauro devemos descobrir qual o id em IdentifierQualif que é pertinente ao tesauro em questão
concepts = IdentifierConceptListQualif.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
for x in concepts:
id_identifier = x.get('identifier_id')
has_register = IdentifierQualif.objects.filter(id=id_identifier,thesaurus_id=self.request.GET.get("ths")).exists()
if has_register:
# Força somente 1 resultado
object_list = IdentifierConceptListQualif.objects.filter(concept_ui=self.actions['s'].strip(),identifier_id=id_identifier).values('identifier_id','termqualif__term_string','termqualif__language_code','termqualif__id')[:1]
except IdentifierConceptListQualif.DoesNotExist:
# order performance -------------------------------------------------------------------------------------
if self.actions['order'] == "-":
object_list = object_list.order_by("%s%s" % (self.actions["order"], self.actions["orderby"]))
if self.actions['visited'] != 'ok':
object_list = object_list.none()
return object_list
def get_context_data(self, **kwargs):
context = super(TermListQualifView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
context['actions'] = self.actions
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['s']:
try:
concepts = IdentifierConceptListQualif.objects.filter(concept_ui=self.actions['s'].strip()).values('identifier_id')
for x in concepts:
id_identifier = x.get('identifier_id')
# print 'IDs --->',id_identifier
has_register = IdentifierQualif.objects.filter(id=id_identifier,thesaurus_id=self.request.GET.get("ths")).exists()
if has_register:
# print 'ID pertinente',id_identifier
# IdentifierQualif
context['id_register_objects'] = IdentifierConceptListQualif.objects.filter(
concept_ui=self.actions['s'].strip(),identifier_id=id_identifier
).values(
# IdentifierConceptListDesc
'id',
'concept_ui',
)
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
except IdentifierQualif.DoesNotExist:
context['identifier_concept_id'] = self.actions['choiced_concept_identifier_id']
return context
def TermListQualifModification(request,term_id, ths, term_ori):
# Descobre qual é o identifier_concept_id do termo destino
id_concept_destino = TermListQualif.objects.filter(id=term_id).values('identifier_concept_id')
# Descobre qual é o identifier_id do conceito
identifier_id_destino = IdentifierConceptListQualif.objects.filter(id=id_concept_destino).values('identifier_id')
identifier_id_destino = identifier_id_destino[0].get('identifier_id')
id_concept_destino = id_concept_destino[0].get('identifier_concept_id')
# Descobre qual é o identifier_concept_id do termo origem
term_origem_values = TermListQualif.objects.filter(id=term_ori).values('identifier_concept_id','term_ui')
id_concept_origem = term_origem_values[0].get('identifier_concept_id')
term_ui_origem = term_origem_values[0].get('term_ui')
qtd_id_concept_origem = TermListQualif.objects.filter(identifier_concept_id=id_concept_origem)
if len(qtd_id_concept_origem) == 1:
# Quando existe apenas um termo para o conceito, deverá ser atualizado para a informação do novo conceito:
# TermListQualif --> campo identifier_concept_id --> recebe valor do identifier_concept_id do termo destino
# IdentifierConceptListQualif --> campo identifier_id --> recebe o valor do identifier_id do conceito destino
# Atualiza o identifier_id do conceito antigo para novo numero identifier_id_destino
IdentifierConceptListQualif.objects.filter(id=id_concept_origem).update(identifier_id=identifier_id_destino,concept_relation_name='NRW',preferred_concept='N')
# Atualiza informações do termo origem e destino
# Prepara informacoes do historico origem
concept_ui_origem = IdentifierConceptListQualif.objects.filter(id=id_concept_origem).values('concept_ui')
concept_ui_origem = concept_ui_origem[0].get('concept_ui')
historical_annotation_old=TermListQualif.objects.filter(id=term_ori).values('id','historical_annotation')
historical_annotation_old=historical_annotation_old[0].get('historical_annotation')
# Armazena informacao para histórico destino
historical_annotation_old_origem=historical_annotation_old
# Prepara informacoes do historico destino
concept_ui_destino = IdentifierConceptListQualif.objects.filter(id=id_concept_destino).values('concept_ui')
concept_ui_destino = concept_ui_destino[0].get('concept_ui')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', sent to ' + concept_ui_destino
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
# Atualiza historico da origem
TermListQualif.objects.filter(id=term_ori).update(status=-3,historical_annotation=historical_annotation_new, date_altered=datetime.datetime.now().strftime('%Y-%m-%d'))
# Pesquisa a existencia de um registro existente no destino com o status de migracao - 3
# para isso pesquisa o term_ui de origem e o status=-3
new_term=TermListQualif.objects.filter(id=term_ori).values('status','term_ui','language_code','term_string','concept_preferred_term','is_permuted_term','lexical_tag','record_preferred_term','entry_version','date_created','date_altered','historical_annotation','term_thesaurus','identifier_concept_id',)
term_ui_ori=new_term[0].get('term_ui')
term_string_ori=new_term[0].get('term_string').encode('utf-8')
exist_term=TermListQualif.objects.filter(status=-3, term_ui=term_ui_ori, term_string=term_string_ori, identifier_concept_id=id_concept_destino).values('id','historical_annotation')
if len(exist_term) > 0:
term_id_exist=exist_term[0].get('id')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + concept_ui_origem
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
# Atualiza o historico do destino
TermListQualif.objects.filter(id=term_id_exist).update(status='1',concept_preferred_term='N',is_permuted_term='N',record_preferred_term='N',historical_annotation=historical_annotation_new, date_altered=datetime.datetime.now().strftime('%Y-%m-%d'))
else:
# Cria nova entrada
item = TermListQualif.objects.create(
status='1',
term_ui=new_term[0].get('term_ui'),
language_code=new_term[0].get('language_code'),
term_string=new_term[0].get('term_string'),
concept_preferred_term='N',
is_permuted_term='N',
lexical_tag=new_term[0].get('lexical_tag'),
record_preferred_term='N',
entry_version=new_term[0].get('entry_version'),
date_created=new_term[0].get('date_created'),
date_altered=datetime.datetime.now().strftime('%Y-%m-%d'),
historical_annotation=datetime.datetime.now().strftime('%Y-%m-%d') + ', received from ' + concept_ui_origem + ';' + historical_annotation_old_origem,
term_thesaurus=new_term[0].get('term_thesaurus'),
identifier_concept_id=id_concept_destino,
)
url = '/thesaurus/qualifiers/view/' + term_ori + '?ths=' + ths
return HttpResponseRedirect(url)
class TermCreateQualifConfirm(LoginRequiredView, TemplateView):
template_name = 'thesaurus/confirm_create_qualif.html'
def get_context_data(self, **kwargs):
context = super(TermCreateQualifConfirm, self).get_context_data(**kwargs)
thesaurus_name = Thesaurus.objects.filter(id=self.request.GET.get("ths")).values('thesaurus_name')
context['thesaurus_name'] = thesaurus_name[0].get('thesaurus_name')
return context
def TermCreateQualifDo(request, ths):
term_string = request.GET.get("term_string")
language_code = request.GET.get("language_code")
term_ui = request.GET.get("term_ui")
term_id = request.GET.get("term_id")
return redirect('/thesaurus/qualifiers/new/?ths=' + ths + '&term=' + term_string + '&language_code=' + language_code + '&term_ui=' + term_ui + '&term_id=' + term_id)
class ConceptCreateQualifConfirm(LoginRequiredView, ListView):
"""
Used to verify if already exist the term
"""
template_name = "thesaurus/confirm_create_register_qualif.html"
context_object_name = "registers"
def get_queryset(self):
object_list = []
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
return object_list
def get_context_data(self, **kwargs):
context = super(ConceptCreateQualifConfirm, self).get_context_data(**kwargs)
context['choiced_thesaurus'] = self.request.GET.get("choiced_thesaurus")
context['choiced_abbreviation'] = self.request.GET.get("abbreviation")
return context
def render_to_response(self, context):
# getting action parameter
self.actions = {}
for key in ACTIONS.keys():
self.actions[key] = self.request.GET.get(key, ACTIONS[key])
if self.actions['term_string'] and self.actions['filter_language']:
ths = self.actions['choiced_thesaurus']
term_string = self.actions['term_string']
language_code = self.actions['filter_language']
concept_id = self.request.GET.get("concept_id")
term_id = self.request.GET.get("term_id")
created_by = self.request.GET.get("created_by")
abbreviation = self.request.GET.get("abbreviation").upper()
# print 'Abbreviation--->',abbreviation
thesaurus_name = self.request.GET.get("choiced_thesaurus_name")
# print 'DEBUG'
# print '[ths - ' ,ths,' ]'
# print '[term_string - ' ,term_string,' ]'
# print '[language_code - ' ,language_code,' ]'
# print '[concept_id - ' ,concept_id,' ]'
# print '[term_id - ' ,term_id,' ]'
# print '[created_by - ' ,created_by,' ]'
# print '[abbreviation - ' ,abbreviation,' ]'
# print '[thesaurus_name - ' ,thesaurus_name,' ]'
# print 'DEBUG'
# Verifica a existencia da combinação de abbreviation e thesaurus, se existir não deverá ser permitido
has_abbreviation = IdentifierQualif.objects.filter(abbreviation=abbreviation,thesaurus=ths).exists()
if not has_abbreviation:
# Descobrindo qual é o descriptor_ui do registro origem
identifier_id_ori = IdentifierConceptListQualif.objects.filter(id=concept_id).values('identifier_id')
identifier_id_ori = identifier_id_ori[0].get('identifier_id')
qualifier_ui_ori = IdentifierQualif.objects.filter(id=identifier_id_ori).values('qualifier_ui')
qualifier_ui_ori = qualifier_ui_ori[0].get('qualifier_ui')
# Verifica se já existe anotação no historico
has_hist=IdentifierConceptListQualif.objects.filter(id=concept_id).values('historical_annotation')
if has_hist:
historical_annotation_old=has_hist[0].get('historical_annotation')
historical_annotation_now=datetime.datetime.now().strftime('%Y-%m-%d') + ', turned into record - received from ' + str(qualifier_ui_ori)
historical_annotation_new=historical_annotation_now.encode('utf-8') + ';' + historical_annotation_old.encode('utf-8')
created_time=datetime.datetime.now().strftime('%Y-%m-%d')
created_time = created_time.encode('utf-8')
# Get sequential number to write to decs_code
try:
seq = code_controller.objects.get(thesaurus=ths)
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller.DoesNotExist:
seq = code_controller(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
decs_code=nseq
# Get thesaurus_acronym to create new ID format to descriptor_ui field
try:
acronym = Thesaurus.objects.filter(id=ths).values('thesaurus_acronym')
# recupera o acronimo e transforma em maiusuclo
acronym = str(acronym[0].get('thesaurus_acronym')).upper()
# utiliza self.object.decs_code para compor descriptor_ui
zseq = str(nseq).zfill(6) # preenche zeros a esquerda
qualifier_ui = 'Q' + acronym + zseq
except Thesaurus.DoesNotExist:
id_thesaurus = str(self.object.id)
print 'Warning! - No thesaurus_acronym for id -->',id_thesaurus
add_reg = IdentifierQualif(qualifier_ui=qualifier_ui, decs_code=decs_code, abbreviation=abbreviation, date_created=created_time, created_by_id=created_by, thesaurus_id=ths)
add_reg.save()
# # Descobrindo último ID inserido
last_id = IdentifierQualif.objects.filter(thesaurus_id=ths).order_by('id').last()
# Atualiza identifier_id antigo para novo id, apaga concept_relation_name, atualiza preferred_concept como preferido e atualiza hsitórico
update_field = IdentifierConceptListQualif.objects.get(id=concept_id)
update_field.identifier_id = last_id
update_field.concept_relation_name = ""
update_field.preferred_concept = "Y"
update_field.historical_annotation = historical_annotation_new
update_field.save()
# Atualiza record_preferred_term dos Termos que foram elegidos como preferidos no registro novo
update_registers = TermListQualif.objects.filter(identifier_concept_id=concept_id, concept_preferred_term='Y', record_preferred_term='N')
if update_registers:
for upd in update_registers:
TermListQualif.objects.filter(id=str(upd)).update(record_preferred_term='Y')
return redirect('/thesaurus/qualifiers/view/' + term_id + '?ths=' + ths)
else:
return redirect('/thesaurus/qualifiers/create/concept/confirm/?ths=' + ths + '&term_string=' + term_string + '&language_code=' + language_code + '&concept_id=' + concept_id + '&term_id=' + term_id + '&created_by=' + created_by + '&thesaurus_name=' + thesaurus_name + '&has_abbreviation=' + abbreviation )
return super(ConceptCreateQualifConfirm, self).render_to_response(context)
class ConceptListQualifCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create concept and term of qualifier
"""
model = IdentifierConceptListQualif
template_name = 'thesaurus/qualifier_new_concept.html'
form_class = IdentifierConceptListQualifForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(ConceptListQualifCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
id_concept = self.object.id
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListQualif.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( id_term, ths )
def form_valid(self, form):
formset_concept = ConceptListQualifFormSet(self.request.POST, instance=self.object)
formset_term = TermListQualifFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
formset_term_valid = formset_term.is_valid()
if (form_valid and formset_concept_valid and formset_term_valid):
# Brings form variables to check if it already exists
term_string = self.request.POST.get("termqualif-0-term_string")
language_code = self.request.POST.get("termqualif-0-language_code")
term_thesaurus = self.request.GET.get("ths")
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
self.object.identifier_id = int(self.request.POST.get("identifier_id"))
self.object = form.save(commit=True)
formset_concept.instance = self.object
formset_concept.save()
formset_term.instance = self.object
formset_term.save()
# Bring the choiced language_code from the first form
registry_language = formset_term.cleaned_data[0].get('language_code')
form.save()
# Update concept_ui with a new format
try:
created_concept_id = int(IdentifierConceptListQualif.objects.latest('id').id)
update_concept_field = IdentifierConceptListQualif.objects.get(id=created_concept_id)
# preenche zeros a esquerda
zseq = str(created_concept_id).zfill(8)
update_concept_field.concept_ui = 'FQ' + zseq
update_concept_field.save()
except IdentifierConceptListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Concept'
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListQualif.objects.latest('id').id)
update_field = TermListQualif.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'q' + zseq
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
# Update created_date
try:
created_id = int(TermListQualif.objects.latest('id').id)
update_date_created = TermListQualif.objects.get(id=created_id)
update_date_created.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
update_date_created.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Concept already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
formset_term=formset_term,
msg_erro=msg_erro,
))
def get_context_data(self, **kwargs):
context = super(ConceptListQualifCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.method == 'GET':
context['formset_concept'] = ConceptListQualifFormSet(instance=self.object)
context['formset_term'] = TermListQualifFormSet(instance=self.object)
return context
class ConceptListQualifUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update a concept of qualifier
"""
model = IdentifierConceptListQualif
template_name = 'thesaurus/qualifier_edit_concept.html'
form_class = IdentifierConceptListQualifForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(ConceptListQualifUpdateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( int(self.request.POST.get("termqualif__id")), ths )
def form_valid(self, form):
formset_concept = ConceptListQualifFormSet(self.request.POST, instance=self.object)
form_valid = form.is_valid()
formset_concept_valid = formset_concept.is_valid()
if (form_valid and formset_concept_valid):
self.object = form.save(commit=False)
self.object.identifier_id = int(self.request.POST.get("identifier_id"))
formset_concept.instance = self.object
formset_concept.save()
form.save()
# Necessário atualizar também os termos que são preferidos no conceito para também preferidos do registro
# print 'id ----->',self.object.id
check_preferred_concept = self.request.POST.get("preferred_concept")
# print 'check_preferred_concept ---->',check_preferred_concept
if check_preferred_concept == 'Y':
TermListQualif.objects.filter(identifier_concept_id=self.object.id, concept_preferred_term='Y').update(record_preferred_term='Y')
else:
TermListQualif.objects.filter(identifier_concept_id=self.object.id, concept_preferred_term='Y').update(record_preferred_term='N')
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
formset_concept=formset_concept,
)
)
def get_context_data(self, **kwargs):
context = super(ConceptListQualifUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
if self.request.method == 'GET':
context['formset_concept'] = ConceptListQualifFormSet(instance=self.object)
return context
class TermListQualifCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create a term
"""
model = TermListQualif
template_name = 'thesaurus/qualifier_new_term.html'
form_class = TermListQualifUniqueForm
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(TermListQualifCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( self.object.id, ths )
def form_valid(self, form):
if form.is_valid():
# Brings form variables to check if it already exists
term_string = self.request.POST.get("term_string")
language_code = self.request.POST.get("language_code")
concept_preferred_term = self.request.POST.get("concept_preferred_term")
record_preferred_term = self.request.POST.get("record_preferred_term")
identifier_concept_id = self.request.POST.get("identifier_concept_id")
term_thesaurus = self.request.GET.get("ths")
if concept_preferred_term == 'Y' and record_preferred_term == 'Y':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="Y",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if concept_preferred_term == 'Y' and record_preferred_term == 'N':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="N",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if ( concept_preferred_term == 'Y' and record_preferred_term == 'Y' ) or ( concept_preferred_term == 'Y' and record_preferred_term == 'N' ):
has_term_config = TermListQualif.objects.filter( q_status_published ).values('id')
if len(has_term_config) > 0:
msg_erro = _("This configuration already exists for this concept!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
msg_erro=msg_erro,
))
else:
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Para poder criar um termo novo, não deve existir
# -1 - Rascunho
# 1 - Publicado
# 5 - Histórico
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
# prove the current date if you are not informed on the form
if not self.object.date_created:
self.object.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
form.save()
registry_language = self.request.POST.get("language_code")
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListQualif.objects.latest('id').id)
update_field = TermListQualif.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'q' + zseq
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(form=form,msg_erro=msg_erro))
else:
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if not has_equal:
self.object = form.save(commit=False)
# prove the current date if you are not informed on the form
if not self.object.date_created:
self.object.date_created = datetime.datetime.now().strftime('%Y-%m-%d')
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
form.save()
registry_language = self.request.POST.get("language_code")
# Update term_ui with a new format
try:
ths = self.request.GET.get("ths")
try:
seq = code_controller_term.objects.get(thesaurus=self.request.GET.get("ths"))
nseq = str(int(seq.sequential_number) + 1)
seq.sequential_number = nseq
seq.save()
except code_controller_term.DoesNotExist:
seq = code_controller_term(sequential_number=1,thesaurus=ths)
nseq = 1
seq.save()
created_id = int(TermListQualif.objects.latest('id').id)
update_field = TermListQualif.objects.get(id=created_id)
# substitui idioma do sistema por sigla de 3 letras
if registry_language == 'en':
language_3letters = 'eng'
if registry_language == 'es':
language_3letters = 'spa'
if registry_language == 'pt-br':
language_3letters = 'por'
if registry_language == 'fr':
language_3letters = 'fre'
if registry_language == 'es-es':
language_3letters = 'spa'
# preenche zeros a esquerda
zseq = str(nseq).zfill(6)
update_field.term_ui = language_3letters + 'q' + zseq
update_field.save()
except TermListQualif.DoesNotExist:
print 'Warning! Does not exist id to this Term'
return HttpResponseRedirect(self.get_success_url())
else:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(form=form,msg_erro=msg_erro))
def get_context_data(self, **kwargs):
context = super(TermListQualifCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class TermListQualifUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update a term
"""
model = TermListQualif
template_name = 'thesaurus/qualifier_edit_term.html'
form_class = TermListQualifUniqueForm
def get_success_url(self):
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s' % ( self.object.id, ths )
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(TermListQualifUpdateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
# Armazena string de term_string para popular historico
term_string_current = TermListQualif.objects.filter(id=self.object.id).values('term_string','concept_preferred_term','record_preferred_term','historical_annotation')
for y in term_string_current:
term_string_old=y.get('term_string')
concept_preferred_term_old=y.get('concept_preferred_term')
record_preferred_term_old=y.get('record_preferred_term')
historical_annotation_old=y.get('historical_annotation')
# print 'Current - TERM ----->',term_string_old
# print 'Current - Historico --->',historical_annotation_old
# Brings form variables to check if it already exists
term_string = self.request.POST.get("term_string")
language_code = self.request.POST.get("language_code")
concept_preferred_term = self.request.POST.get("concept_preferred_term")
record_preferred_term = self.request.POST.get("record_preferred_term")
identifier_concept_id = self.request.POST.get("identifier_concept_id")
term_thesaurus = self.request.GET.get("ths")
# Username
user_data = additional_user_info(self.request)
for user_name in user_data:
username=user_data.get('user_name').encode('utf-8')
break
v998='^d' + datetime.datetime.now().strftime('%Y-%m-%d') + '^h' + term_string_old + '^u' + username + '^t'
# Se ocorreu alteracao
# Grava configuração anterior
# concept_preferred_term_old
# Decide prenchimento com [ 01, 02, 03, 04 ou 16 ] ou [ 51, 52, 53, 54 e 516]
if concept_preferred_term_old == 'Y':
if language_code == 'en':
sub_t='01'
if language_code == 'es':
sub_t='02'
if language_code == 'pt-br':
sub_t='03'
if language_code == 'es-es':
sub_t='04'
if language_code == 'fr':
sub_t='16'
else:
if language_code == 'en':
sub_t='51'
if language_code == 'es':
sub_t='52'
if language_code == 'pt-br':
sub_t='53'
if language_code == 'es-es':
sub_t='54'
if language_code == 'fr':
sub_t='516'
term_string_historical = v998 + sub_t
if concept_preferred_term == 'Y' and record_preferred_term == 'Y':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="Y",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if concept_preferred_term == 'Y' and record_preferred_term == 'N':
# Verifica se já não existe configuração para esse conceito com mesmo language_code, concept_preferred_term = "Y" e record_preferred_term = "Y"
# Search by published record
q_status_published = Q(
language_code=language_code,
term_thesaurus=term_thesaurus,
concept_preferred_term="Y",
record_preferred_term="N",
identifier_concept_id=identifier_concept_id,
# status=1,
)
if ( concept_preferred_term == 'Y' and record_preferred_term == 'Y' ) or ( concept_preferred_term == 'Y' and record_preferred_term == 'N' ):
has_term_config = TermListQualif.objects.filter( q_status_published ).values('id').exclude(id=self.object.id,)
if len(has_term_config) > 0:
msg_erro = _("This configuration already exists for this concept!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
msg_erro=msg_erro,
))
else:
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).exclude(id=self.object.id).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if has_equal:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
msg_erro=msg_erro,
))
else:
form_valid = form.is_valid()
if form_valid:
self.object = form.save(commit=False)
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object.date_altered = datetime.datetime.now().strftime('%Y-%m-%d')
term_string_historical = v998 + sub_t
if len(historical_annotation_old) > 0:
term_string_historical=term_string_historical + ';' + historical_annotation_old
self.object.historical_annotation = term_string_historical
self.object = form.save(commit=True)
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
))
else:
# Search by draft record
q_status_draft = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=-1,
)
# Search by published record
q_status_published = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=1,
)
# Search by historical record
q_status_historical = Q(
term_string__exact=term_string,
language_code=language_code,
term_thesaurus=term_thesaurus,
status=5,
)
has_term = TermListQualif.objects.filter( q_status_draft | q_status_published | q_status_historical ).exclude(id=self.object.id).values('term_string')
# Corre resultados e compara
has_equal=''
for term in has_term:
t=term.get('term_string').encode('utf-8')
if t == term_string.encode('utf-8'):
# print 'Igual-->',t
has_equal=t
if has_equal:
msg_erro = _("This Term already exist!") + ' -----> ' + term_string + ' (' + language_code + ')'
return self.render_to_response(self.get_context_data(
form=form,
msg_erro=msg_erro,
))
else:
form_valid = form.is_valid()
if form_valid:
self.object = form.save(commit=False)
self.object.identifier_concept_id = self.request.POST.get("identifier_concept_id")
self.object.date_altered = datetime.datetime.now().strftime('%Y-%m-%d')
term_string_historical = v998 + sub_t
if len(historical_annotation_old) > 0:
term_string_historical=term_string_historical + ';' + historical_annotation_old
self.object.historical_annotation = term_string_historical
self.object = form.save(commit=True)
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(
form=form,
))
def get_context_data(self, **kwargs):
context = super(TermListQualifUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class legacyInformationQualifCreateView(LoginRequiredView, CreateView):
"""
Used as class view to create legacy information
"""
model = legacyInformationQualif
template_name = 'thesaurus/qualifier_new_legacy.html'
form_class = legacyInformationQualifForm
def get_success_url(self):
id_identifier = self.request.GET.get("identifier_id")
# Search ID of the first concept of this record
concepts_of_registry = IdentifierConceptListQualif.objects.filter(identifier_id=id_identifier).values('id')
id_concept = concepts_of_registry[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListQualif.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s%s' % ( id_term, ths )
def form_valid(self, form):
if form.is_valid():
self.object = form.save(commit=False)
self.object.identifier_id = self.request.POST.get("identifier_id")
self.object = form.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(legacyInformationQualifCreateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class legacyInformationQualifUpdateView(LoginRequiredView, UpdateView):
"""
Used as class view to update a legacy information
"""
model = legacyInformationQualif
template_name = 'thesaurus/qualifier_edit_legacy.html'
form_class = legacyInformationQualifForm
def get_success_url(self):
id_identifier = self.request.GET.get("identifier_id")
# Search ID of the first concept of this record
concepts_of_registry = IdentifierConceptListQualif.objects.filter(identifier_id=id_identifier).values('id')
id_concept = concepts_of_registry[0].get('id')
# Search ID of the first term of this concept to redirect
terms_of_concept = TermListQualif.objects.filter(identifier_concept_id=id_concept).values('id')
id_term = terms_of_concept[0].get('id')
ths = '?ths=' + self.request.GET.get("ths")
return '/thesaurus/qualifiers/view/%s%s%s' % ( id_term, ths )
def form_valid(self, form):
if form.is_valid():
self.object = form.save()
form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(legacyInformationQualifUpdateView, self).get_context_data(**kwargs)
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
class PageViewQualif(LoginRequiredView, DetailView):
"""
Used as class view to list the result
"""
model = TermListQualif
template_name = 'thesaurus/page_view_qualif.html'
def dispatch(self, *args, **kwargs):
user_data = additional_user_info(self.request)
user_cc = user_data['user_cc']
user_role = user_data['service_role']
if user_cc != 'BR1.1':
# Brings "ths" from the environment
if self.request.GET.get("thesaurus"):
environment_thesaurus_id=self.request.GET.get("thesaurus")
else:
if self.request.GET.get("ths"):
environment_thesaurus_id=self.request.GET.get("ths")
access_status=False
# Create array with all registered thesaurus
ids_thesaurus = []
ids_thesaurus = Thesaurus.objects.all().values('id','thesaurus_scope')
# Run user_role array and compare the service registered to the user with the service registered in thesaurus.
# If the service exists bring the id of that service and compare with the id that is in the environment at the moment, if not exist generates the deny page
for role in user_role:
user_service=role
for elem in ids_thesaurus:
id_thesaurus = elem.get('id')
thesaurus_scope = elem.get('thesaurus_scope')
if user_service == thesaurus_scope and int(id_thesaurus) == int(environment_thesaurus_id):
access_status=True
if access_status==False:
return HttpResponseForbidden()
return super(PageViewQualif, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PageViewQualif, self).get_context_data(**kwargs)
if self.object:
# IdentifierConceptListQualif - recupera pk do conceito
id_concept = IdentifierConceptListQualif.objects.filter(
id=self.object.identifier_concept_id,
).values('identifier_id').distinct()
# Used to create new concept
for concept in id_concept:
context['id_concept_new'] = concept
# IdentifierConceptListQualif - retrieves pk's that has even identifier_id - can bring more than 1
ids = IdentifierConceptListQualif.objects.filter(
identifier_id=id_concept,
).values('id')
# IdentifierQualif
# Brings information to Active Descriptor Record
context['identifierqualif_objects'] = IdentifierQualif.objects.filter(
id=id_concept,
)
context['id_register_objects'] = IdentifierQualif.objects.filter(
id=id_concept,
).values(
# IdentifierQualif
'id',
'thesaurus',
'qualifier_ui',
'decs_code',
'external_code',
'abbreviation',
'date_created',
'date_revised',
'date_established',
)
context['description_objects'] = IdentifierQualif.objects.filter(
id=id_concept,
).values(
# DescriptionQualif
'descriptionqualif__identifier_id',
'descriptionqualif__language_code',
'descriptionqualif__annotation',
'descriptionqualif__history_note',
'descriptionqualif__online_note',
)
# Used to create tree number list
context['tree_numbers_objects'] = IdentifierQualif.objects.filter(
id=id_concept,
).values(
# TreeNumbersListQualif
'qtreenumbers__identifier_id',
'qtreenumbers__tree_number',
).distinct().order_by('qtreenumbers__tree_number')
context['term_string_info_preferred_objects'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,termqualif__concept_preferred_term='Y',termqualif__record_preferred_term='Y',
).order_by('identifier_id',
'termqualif__identifier_concept_id',
'-preferred_concept',
'-termqualif__concept_preferred_term',
'termqualif__language_code',
'termqualif__term_string',
).values(
'id',
'termqualif__status',
'termqualif__term_string',
'termqualif__language_code',
'identifier_id',
)
context['entry_terms_objects'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,termqualif__status=1,termqualif__record_preferred_term='N',
).order_by('identifier_id',
'termqualif__language_code',
'termqualif__term_string',
).values(
'id',
'termqualif__id',
'termqualif__term_string',
'termqualif__language_code',
)
context['scope_note_objects'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by('identifier_id',
).values(
'conceptqualif__language_code',
'conceptqualif__scope_note',
).distinct()
context['legacy_objects'] = legacyInformationQualif.objects.filter(
identifier=id_concept,
).values(
'id',
'pre_codificado',
'desastre',
'reforma_saude',
'geografico',
'mesh',
'pt_lilacs',
'nao_indexavel',
'homeopatia',
'repidisca',
'saude_publica',
'exploded',
'geog_decs',
'identifier_id',
)
# Usado para mostrar informações de conceitos e termos Preferidos
context['identifierconceptlist_objects_preferred'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by(
'-preferred_concept',
'-termqualif__concept_preferred_term',
'termqualif__language_code',
'termqualif__term_string',
).values(
'id',
'identifier_id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'casn1_name',
'registry_number',
'conceptqualif__identifier_concept_id',
'conceptqualif__language_code',
'conceptqualif__scope_note',
'termqualif__id',
'termqualif__identifier_concept_id',
'termqualif__status',
'termqualif__term_ui',
'termqualif__language_code',
'termqualif__term_string',
'termqualif__concept_preferred_term',
'termqualif__is_permuted_term',
'termqualif__lexical_tag',
'termqualif__record_preferred_term',
'termqualif__entry_version',
'termqualif__date_created',
'termqualif__date_altered',
'termqualif__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Preferidos para Aba de Conceitos
context['identifierconceptlist_objects_preferred_for_concepts'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,preferred_concept='Y',
).order_by(
'-preferred_concept',
'-termqualif__concept_preferred_term',
'termqualif__language_code',
'termqualif__term_string',
).values(
'identifier_id',
'id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'termqualif__id',
'termqualif__identifier_concept_id',
'termqualif__status',
'termqualif__term_ui',
'termqualif__language_code',
'termqualif__term_string',
'termqualif__concept_preferred_term',
'termqualif__is_permuted_term',
'termqualif__lexical_tag',
'termqualif__record_preferred_term',
'termqualif__entry_version',
'termqualif__date_created',
'termqualif__date_altered',
'termqualif__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Não Preferidos
context['identifierconceptlist_objects'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,preferred_concept='N',
).order_by('identifier_id',
'termqualif__identifier_concept_id',
'-preferred_concept',
'-termqualif__concept_preferred_term',
'termqualif__language_code',
'termqualif__term_string',
).values(
'id',
'identifier_id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'casn1_name',
'registry_number',
'conceptqualif__identifier_concept_id',
'conceptqualif__language_code',
'conceptqualif__scope_note',
'termqualif__id',
'termqualif__identifier_concept_id',
'termqualif__status',
'termqualif__term_ui',
'termqualif__language_code',
'termqualif__term_string',
'termqualif__concept_preferred_term',
'termqualif__is_permuted_term',
'termqualif__lexical_tag',
'termqualif__record_preferred_term',
'termqualif__entry_version',
'termqualif__date_created',
'termqualif__date_altered',
'termqualif__historical_annotation',
).distinct()
# Usado para mostrar informações de conceitos e termos Não Preferidos para Aba Conceitos
context['identifierconceptlist_objects_for_concepts'] = IdentifierConceptListQualif.objects.filter(
identifier=id_concept,preferred_concept='N',
).order_by('identifier_id',
'termqualif__identifier_concept_id',
'-preferred_concept',
'-termqualif__concept_preferred_term',
'termqualif__language_code',
'termqualif__term_string',
).values(
'identifier_id',
'id',
'concept_ui',
'concept_relation_name',
'preferred_concept',
'termqualif__id',
'termqualif__identifier_concept_id',
'termqualif__status',
'termqualif__term_ui',
'termqualif__language_code',
'termqualif__term_string',
'termqualif__concept_preferred_term',
'termqualif__is_permuted_term',
'termqualif__lexical_tag',
'termqualif__record_preferred_term',
'termqualif__entry_version',
'termqualif__date_created',
'termqualif__date_altered',
'termqualif__historical_annotation',
).distinct()
# Informacoes para log
# Registro
# ID do model
id_ctype_identifierqualif = ContentType.objects.filter(model='identifierqualif').values('id')
context['id_ctype_identifierqualif'] = id_ctype_identifierqualif[0].get('id')
# ID do registro
id_identifierqualif = IdentifierQualif.objects.filter(id=id_concept).values('id')
context['id_identifierqualif'] = id_identifierqualif[0].get('id')
context['choiced_thesaurus_name'] = Thesaurus.objects.filter(id=self.request.GET.get("ths"))
return context
| 49.1726 | 321 | 0.545576 | 26,862 | 281,759 | 5.454694 | 0.02658 | 0.027627 | 0.023157 | 0.028773 | 0.946118 | 0.934216 | 0.923391 | 0.911816 | 0.894468 | 0.88419 | 0 | 0.003783 | 0.367601 | 281,759 | 5,729 | 322 | 49.181183 | 0.818536 | 0.117111 | 0 | 0.852263 | 0 | 0 | 0.108962 | 0.046804 | 0.001084 | 0 | 0 | 0.000349 | 0 | 0 | null | null | 0 | 0.005693 | null | null | 0.005693 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
530a9182ddc9d8013c40373926d47146d279f9cf | 3,427 | py | Python | tests/integration/test_md_lammps_cython_calculator.py | costrouc/dftfit | a00354f8f0d611bf57c6925f920c749d8628cf98 | [
"MIT"
] | 27 | 2016-08-10T18:55:40.000Z | 2021-08-03T08:43:21.000Z | tests/integration/test_md_lammps_cython_calculator.py | costrouc/dftfit | a00354f8f0d611bf57c6925f920c749d8628cf98 | [
"MIT"
] | null | null | null | tests/integration/test_md_lammps_cython_calculator.py | costrouc/dftfit | a00354f8f0d611bf57c6925f920c749d8628cf98 | [
"MIT"
] | 9 | 2018-08-26T14:40:14.000Z | 2021-09-19T06:12:56.000Z | import pytest
from dftfit.predict import Predict
from dftfit.potential import Potential
@pytest.mark.lammps_cython
@pytest.mark.calculator
@pytest.mark.benchmark(group='predict', min_rounds=1)
def test_lammps_cython_md_calculator_static(benchmark, structure, potential):
potential = potential('test_files/potential/MgO-charge-buck.yaml')
structure = structure('test_files/structure/MgO.cif')
predict = Predict('lammps_cython')
@benchmark
def test():
predict.static(structure, potential)
@pytest.mark.lammps_cython
@pytest.mark.calculator
@pytest.mark.benchmark(group='predict', min_rounds=1)
def test_lammps_cython_md_calculator_lattice_constant(benchmark, structure, potential):
potential = potential('test_files/potential/MgO-charge-buck.yaml')
structure = structure('test_files/structure/MgO.cif')
predict = Predict('lammps_cython')
@benchmark
def test():
predict.lattice_constant(structure, potential)
@pytest.mark.lammps_cython
@pytest.mark.calculator
@pytest.mark.benchmark(group='predict', min_rounds=1)
def test_lammps_cython_md_calculator_elastic_constant(benchmark, structure, potential):
potential = potential('test_files/potential/MgO-charge-buck.yaml')
structure = structure('test_files/structure/MgO.cif')
predict = Predict('lammps_cython')
@benchmark
def test():
predict.elastic_constant(structure, potential)
@pytest.mark.long
@pytest.mark.lammps_cython
@pytest.mark.calculator
@pytest.mark.benchmark(group='predict', min_rounds=1)
def test_lammps_cython_md_calculator_point_defects(benchmark, structure, potential, training):
potential = potential('test_files/potential/MgO-charge-buck.yaml')
structure = structure('test_files/structure/MgO.cif')
training = training('test_files/training/training-mattoolkit-mgo-properties.yaml', cache_filename="test_files/mattoolkit/cache/cache.db")
point_defects_schema = training.schema['spec'][7]['data']
predict = Predict('lammps_cython')
# Equilibrium structure
old_lattice, new_lattice = predict.lattice_constant(structure, potential)
structure.modify_lattice(new_lattice)
@benchmark
def test():
predict.point_defects(structure, potential, point_defects_schema, supercell=(2, 2, 2))
@pytest.mark.long
@pytest.mark.lammps_cython
@pytest.mark.calculator
@pytest.mark.benchmark(group='predict', min_rounds=1)
def test_lammps_cython_md_calculator_displacement_energies(benchmark, structure, potential, training):
potential = potential('test_files/potential/MgO-charge-buck.yaml')
structure = structure('test_files/structure/MgO.cif')
training = training('test_files/training/training-mattoolkit-mgo-properties.yaml', cache_filename="test_files/mattoolkit/cache/cache.db")
displacement_energy_schema = training.schema['spec'][8]['data']
predict = Predict('lammps_cython')
# Equilibrium structure
old_lattice, new_lattice = predict.lattice_constant(structure, potential)
structure.modify_lattice(new_lattice)
@benchmark
def test():
predict.displacement_energies(structure, potential, displacement_energy_schema,
supercell=(2, 2, 2),
max_displacement_energy=50,
site_radius=0.5,
num_steps=2000, resolution=10, timestep=0.001)
| 38.943182 | 141 | 0.738255 | 402 | 3,427 | 6.079602 | 0.161692 | 0.069558 | 0.032733 | 0.045008 | 0.844108 | 0.812602 | 0.812602 | 0.812602 | 0.812602 | 0.812602 | 0 | 0.009285 | 0.151444 | 3,427 | 87 | 142 | 39.390805 | 0.831155 | 0.012547 | 0 | 0.716418 | 0 | 0 | 0.192547 | 0.158237 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149254 | false | 0 | 0.044776 | 0 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
53187a434c000aadd77e1d887e5d9ae2aec8566f | 13,289 | py | Python | python/proyecciones.py | Locottus/clima | 0568f8e3ce840b649e5107e92a2a782ddbd8296c | [
"MIT"
] | 1 | 2020-10-12T03:06:21.000Z | 2020-10-12T03:06:21.000Z | python/proyecciones.py | Locottus/clima | 0568f8e3ce840b649e5107e92a2a782ddbd8296c | [
"MIT"
] | null | null | null | python/proyecciones.py | Locottus/clima | 0568f8e3ce840b649e5107e92a2a782ddbd8296c | [
"MIT"
] | null | null | null | import psycopg2, json, datetime, sys, requests, time
#connstr para bd
#dev str
#conn_string = "host='localhost' dbname='clima' user='postgres' password='Guatemala1'"
#produccion str
conn_string = "host='172.17.250.12' dbname='clima' user='postgres' password='postgres2020!Incyt'"
prediccion = 5 #esto es el num. de anios a proyectar a futuro
def getProcessDate():
try:
from datetime import date
today = date.today()
yesterday = today - datetime.timedelta(days=1)
return yesterday
except:
print("error en getProcessDate")
def convUTF8(cadena):
try:
return str(cadena).replace("á","a").replace("é","e").replace("í","i").replace("ó","o").replace("ú","u").replace("ñ","n").replace("Á","A").replace("É","E").replace("Í","I").replace("Ó","O").replace("Ú","U").replace("Ñ","Ñ")
except:
return cadena
def getLocation():
try:
from psycopg2.extras import RealDictCursor
conn = psycopg2.connect(conn_string)
cursor = conn.cursor(cursor_factory=RealDictCursor)
cursor.execute("select distinct estacion from historico_estaciones ")
l = json.dumps(cursor.fetchall(),indent = 2)
conn.close()
return l
except:
print("error en getLocation")
def ejecutaComandoPsql(query):
try:
#print(query)
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
except:
print("error en ejecutar comando psql")
def cargaUltimoAnio(estacion):
m = 0
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute("select max(year) from historico_estaciones where estacion = '"+estacion+"'")
rows = cursor.fetchall()
for row in rows:
#m.append([row[0],row[1].upper()])
m = row[0]
#print(row[0],row[1])
conn.close()
#print(m)
return m
def cargaEstaciones():
m = []
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute('select distinct estacion from historico_estaciones')
rows = cursor.fetchall()
for row in rows:
#m.append([row[0],row[1].upper()])
m.append(row[0])
#print(row[0],row[1])
conn.close()
#print(m)
return m
def getAVGLluvia(estacion,mes):
m = []
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
q = "select estacion,year,mes,round(avg(lluvia),5) as \"avgLluvia\" from historico_estaciones where lluvia > 0 and mes = " + str(mes) + "and estacion = '" + estacion + "' group by estacion,year,mes order by mes,year "
cursor.execute(q)
rows = cursor.fetchall()
for row in rows:
#m.append([row[0],row[1].upper()])
m.append([row[0],row[1],row[2],row[3],0])
#print(([row[0],row[1],row[2],row[3],row[4]]))
conn.close()
#print(m)
return m
def getAVGTemperatura(estacion,mes):
m = []
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
q = " select estacion,year,mes,round(avg(TMAX),5) as \"tMax\",round(avg(Tmin),5) as \"tMin\", round( (avg(tmax) + avg(tmin)) / 2,1) as \"tPromedio\" from historico_estaciones where tmax >=-10 and tmin >=-10 and mes = " + str(mes) + " and estacion = '" + str(estacion) + "' group by estacion,year,mes order by mes,year "
cursor.execute(q)
rows = cursor.fetchall()
for row in rows:
#m.append([row[0],row[1].upper()])
m.append([row[0],row[1],row[2],row[3],row[4],row[5],0,0,0])
#print(([row[0],row[1],row[2],row[3],row[4]]))
conn.close()
#print(m)
return m
def ejecutaComandoPsql(query):
try:
#print(query)
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
except:
print("error en ejecutar comando psql")
def cargaLLuviaPorDia():
print('inicia la carga en un arreglo')
arr = []
arr.append([1,2,3])
arr.append([4,5,6])
np_arr = np.array(arr)
#print(arr)
#print(np_arr)
def calculaProyeccionAbsoluto(ultimoAnio, datosLluvia, estacion):
print(estacion)#estacion,mes,dia,avg,count
p = []
for x in range(1, 5):
anio = ultimoAnio + x
print(anio)
for d in datosLluvia:
print(d)
p.append([estacion,anio,mes,dia])
def proyeccionAbsolutaLluvia(lista,ultimoAnio):
#print('proyectando')
i = 1
promedio = 0
while (i < len(lista)-1):
lista[i][4] = lista[i][3] - lista[i - 1][3]
promedio += lista[i][4]
#print(lista[i]);
i+=1
promedio = promedio / len(lista)
#print(promedio)
estacion = lista[i][0]
mes = lista[i][2]
ultimoValor = lista[i][3]
i = 1
futuro = []
while ( i <= prediccion):
#y = (ultimoAnio) + 100
#print(estacion)
#print(y)
#print(mes)
#print(ultimoValor + promedio)
futuro.append([estacion ,ultimoAnio + i, mes, ultimoValor + promedio])
ultimoValor = ultimoValor + promedio
i += 1
#print(futuro)
return futuro
def proyeccionPorcentual(lista,ultimoAnio):
#print('proyectando')
i = 1
promedio = 0
while (i < len(lista)-1):
if (lista[i][3] != 0):
lista[i][4] = (lista[i][3] - lista[i - 1][3]) / lista[i][3]
promedio += lista[i][4]
#print(lista[i]);
i+=1
promedio = promedio / len(lista)
#print(promedio)
estacion = lista[i][0]
mes = lista[i][2]
ultimoValor = lista[i][3]
i = 1
futuro = []
while ( i <= prediccion):
#y = (ultimoAnio) + 100
#print(estacion)
#print(y)
#print(mes)
#print(ultimoValor + promedio)
futuro.append([estacion ,ultimoAnio + i, mes, ultimoValor + promedio])
ultimoValor = ultimoValor + promedio
i += 1
#print(futuro)
return futuro
def ejecutaComandoPsql(query):
try:
#print(query)
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
except:
print("error en ejecutar comando psql")
def proyeccionAbsolutaTemperatura(lista,ultimoAnio):
#print('proyectando')
#['La_Fragua', Decimal('2018'), Decimal('1'), Decimal('30.13226'), Decimal('18.55806'), Decimal('24.3'), 0, 0, 0]
i = 1
promedioMin = 0
promedioMax = 0
promedioAvg = 0
while (i < len(lista)-1):
lista[i][6] = lista[i][3] - lista[i - 1][3]
lista[i][7] = lista[i][4] - lista[i - 1][4]
lista[i][8] = lista[i][5] - lista[i - 1][5]
promedioMax += lista[i][6]
promedioMin += lista[i][7]
promedioAvg += lista[i][8]
#print(lista[i]);
i+=1
promedioMin = promedioMin / len(lista)
promedioMax = promedioMax / len(lista)
promedioAvg = promedioAvg / len(lista)
#print(promedioMin,promedioMax,promedioAvg)
estacion = lista[i][0]
mes = lista[i][2]
ultimoValorMax = lista[i][3]
ultimoValorMin = lista[i][4]
ultimoValorAvg = lista[i][5]
i = 1
futuro = []
while ( i <= prediccion):
#y = (ultimoAnio) + 100
#print(estacion)
#print(y)
#print(mes)
#print(ultimoValor + promedio)
futuro.append([estacion ,ultimoAnio + i, mes, ultimoValorMax + promedioMax, ultimoValorMin + promedioMin, ultimoValorAvg + promedioAvg])
ultimoValorMin = ultimoValorMin + promedioMin
ultimoValorMax = ultimoValorMax + promedioMax
ultimoValorAvg = ultimoValorAvg + promedioAvg
i += 1
#print(futuro)
return futuro
def proyeccionPorcentualTemperatura(lista,ultimoAnio):
#print('proyectando')
#['La_Fragua', Decimal('2018'), Decimal('1'), Decimal('30.13226'), Decimal('18.55806'), Decimal('24.3'), 0, 0, 0]
i = 1
promedioMin = 0
promedioMax = 0
promedioAvg = 0
while (i < len(lista)-1):
if (lista[i - 1][3] != 0):
lista[i][6] = (lista[i][3] - lista[i - 1][3])/lista[i - 1][3]
if (lista[i - 1][4] != 0):
lista[i][7] = (lista[i][4] - lista[i - 1][4])/lista[i - 1][4]
if (lista[i - 1][5] != 0):
lista[i][8] = (lista[i][5] - lista[i - 1][5])/lista[i - 1][5]
promedioMax += lista[i][6]
promedioMin += lista[i][7]
promedioAvg += lista[i][8]
#print(lista[i]);
i+=1
promedioMin = promedioMin / len(lista)
promedioMax = promedioMax / len(lista)
promedioAvg = promedioAvg / len(lista)
#print(promedioMin,promedioMax,promedioAvg)
estacion = lista[i][0]
mes = lista[i][2]
ultimoValorMax = lista[i][3]
ultimoValorMin = lista[i][4]
ultimoValorAvg = lista[i][5]
i = 1
futuro = []
while ( i <= prediccion):
#y = (ultimoAnio) + 100
#print(estacion)
#print(y)
#print(mes)
#print(ultimoValor + promedio)
futuro.append([estacion ,ultimoAnio + i, mes, ultimoValorMax + promedioMax, ultimoValorMin + promedioMin, ultimoValorAvg + promedioAvg])
ultimoValorMin = ultimoValorMin + promedioMin
ultimoValorMax = ultimoValorMax + promedioMax
ultimoValorAvg = ultimoValorAvg + promedioAvg
i += 1
#print(futuro)
return futuro
def resetAll():
print('borrando tablas')
q = "delete from proyeccion_absoluta_lluvia"
ejecutaComandoPsql(q)
q = "delete from proyeccion_porcentual_lluvia"
ejecutaComandoPsql(q)
q = "delete from proyeccion_absoluta_Temperatura"
ejecutaComandoPsql(q)
q = "delete from proyeccion_porcentual_Temperatura"
ejecutaComandoPsql(q)
if __name__ == "__main__":
resetAll()
print('inicia proceso de proyecciones')
estaciones = cargaEstaciones()
#temperatura
for e in estaciones:
print(e)
for x in range(1, 13):
print('mes temperatura',x)
lista = getAVGTemperatura(e,x)
ultimoAnio = cargaUltimoAnio(e)
futuroAbsoluto = proyeccionAbsolutaTemperatura(lista,ultimoAnio)
#print(futuroAbsoluto)
for f in futuroAbsoluto:
q = 'insert into proyeccion_absoluta_Temperatura (estacion,anio,mes,proyeccion_max,proyeccion_min,proyeccion_avg) values (\'' + str(f[0]) + '\','+ str(f[1]) + ',' + str(f[2]) + ',' + str(f[3]) + ',' + str(f[4])+ ',' + str(f[5]) + ' )'
ejecutaComandoPsql(q)
'''
futuroAbsoluto = proyeccionPorcentualTemperatura(lista,ultimoAnio)
for f in futuroAbsoluto:
q = 'insert into proyeccion_porcentual_Temperatura (estacion,anio,mes,proyeccion_max,proyeccion_min,proyeccion_avg) values (\'' + str(f[0]) + '\','+ str(f[1]) + ',' + str(f[2]) + ',' + str(f[3]) + ',' + str(f[4])+ ',' + str(f[5]) + ' )'
ejecutaComandoPsql(q)
'''
#lluvia
for e in estaciones:
print(e)
for x in range(1, 13):
print('mes lluvia',x)
lista = getAVGLluvia(e,x)
ultimoAnio = cargaUltimoAnio(e)
futuroAbsoluto = proyeccionAbsolutaLluvia(lista,ultimoAnio)
for f in futuroAbsoluto:
q = 'insert into proyeccion_absoluta_lluvia (estacion,anio,mes,proyeccion) values (\'' + str(f[0]) + '\','+ str(f[1]) + ',' + str(f[2]) + ',' + str(f[3]) + ' )'
ejecutaComandoPsql(q)
'''
futuroPorcentual = proyeccionPorcentual(lista,ultimoAnio)
for f in futuroPorcentual:
q = 'insert into proyeccion_porcentual_lluvia (estacion,anio,mes,proyeccion) values (\'' + str(f[0]) + '\','+ str(f[1]) + ',' + str(f[2]) + ',' + str(f[3]) + ' )'
ejecutaComandoPsql(q)
'''
print('fin de proyeccion')
'''
select estacion,year,mes,round(avg(lluvia),5) as "avgLluvia"
from historico_estaciones
where lluvia >=-10
and mes = " + str(mes) + "
and estacion = '" + estacion + "'
group by estacion,year,mes order by mes,year
select estacion,year,mes,round(avg(TMAX),5) as "tMax",round(avg(Tmin),5) as "tMin",
round( (avg(tmax) + avg(tmin)) / 2,1) as "tPromedio"
from historico_estaciones where tmax >=-10 and tmin >=-10 and mes = 12
and estacion = 'INSIVUMEH' group by estacion,year,mes order by mes,year
create table proyeccion_absoluta_lluvia(
id SERIAL,
estacion text not null,
anio numeric not null,
mes numeric not null,
proyeccion numeric not null,
unique (estacion,anio,mes, proyeccion)
)
create table proyeccion_porcentual_lluvia(
id SERIAL,
estacion text not null,
anio numeric not null,
mes numeric not null,
proyeccion numeric not null,
unique (estacion,anio,mes, proyeccion)
)
create table proyeccion_absoluta_temperatura(
id SERIAL,
estacion text not null,
anio numeric not null,
mes numeric not null,
proyeccion_min numeric not null,
proyeccion_max numeric not null,
proyeccion_avg numeric not null,
unique (estacion,anio,mes, proyeccion_min,proyeccion_max,proyeccion_avg)
)
create table proyeccion_porcentual_temperatura(
id SERIAL,
estacion text not null,
anio numeric not null,
mes numeric not null,
proyeccion_min numeric not null,
proyeccion_max numeric not null,
proyeccion_avg numeric not null,
unique (estacion,anio,mes, proyeccion_min,proyeccion_max,proyeccion_avg)
)
'''
| 29.078775 | 324 | 0.607645 | 1,642 | 13,289 | 4.873934 | 0.124848 | 0.044983 | 0.02799 | 0.009996 | 0.80045 | 0.778333 | 0.767837 | 0.74647 | 0.73435 | 0.713482 | 0 | 0.028725 | 0.242908 | 13,289 | 456 | 325 | 29.142544 | 0.766723 | 0.109339 | 0 | 0.666667 | 0 | 0.004065 | 0.141627 | 0.039055 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.004065 | 0.012195 | null | null | 0.065041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
533e1dedc82b7251f637cbfa837b64a5d5172876 | 246 | py | Python | nlpatl/models/classification/__init__.py | dumpmemory/nlpatl | 59209242d1ac26714b11b86261070ac50cc90432 | [
"MIT"
] | 18 | 2021-11-29T06:43:46.000Z | 2022-03-29T09:58:32.000Z | nlpatl/models/classification/__init__.py | dumpmemory/nlpatl | 59209242d1ac26714b11b86261070ac50cc90432 | [
"MIT"
] | null | null | null | nlpatl/models/classification/__init__.py | dumpmemory/nlpatl | 59209242d1ac26714b11b86261070ac50cc90432 | [
"MIT"
] | 1 | 2021-11-29T06:43:47.000Z | 2021-11-29T06:43:47.000Z | from nlpatl.models.classification.classification import Classification
from nlpatl.models.classification.sklearn_classification import SkLearnClassification
from nlpatl.models.classification.xgboost_classification import XGBoostClassification
| 61.5 | 86 | 0.902439 | 23 | 246 | 9.565217 | 0.391304 | 0.136364 | 0.218182 | 0.409091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060976 | 246 | 3 | 87 | 82 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
7265c3c273b5913dacff5c98280a72042406f0d9 | 10,266 | py | Python | tests/test_services/test_set_1073.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 19 | 2019-05-15T13:41:58.000Z | 2019-11-13T09:09:37.000Z | tests/test_services/test_set_1073.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 9 | 2019-07-24T08:31:33.000Z | 2020-09-22T04:01:46.000Z | tests/test_services/test_set_1073.py | ucloud/ucloud-sdk-python2 | 90fb43198df73a78d64bbd98675dc7b302856057 | [
"Apache-2.0"
] | 3 | 2019-06-18T00:22:07.000Z | 2020-04-24T02:28:06.000Z | # -*- coding: utf-8 -*-
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(1073)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_1073(client, variables):
scenario.initial(variables)
scenario.variables["Password"] = "dXFhQHVjbG91ZA=="
scenario.variables["Type"] = "SSD-2"
scenario.variables["Name"] = "test123123"
scenario.variables["Remark"] = "test123123"
scenario.variables["ChargeType"] = "Month"
scenario.variables["ProjectId"] = "ohrg-xbbyex"
scenario.run(client)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribePHostImageResponse"),
],
action="DescribePHostImage",
)
def describe_phost_image_00(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"ImageType": "Base",
}
try:
resp = client.uphost().describe_phost_image(d)
except exc.RetCodeException as e:
resp = e.json()
variables["ImageID1"] = utest.value_at_path(resp, "ImageSet.0.ImageId")
variables["ImageID2"] = utest.value_at_path(resp, "ImageSet.2.ImageId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "GetPHostPriceResponse"),
],
action="GetPHostPrice",
)
def get_phost_price_01(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"Quantity": 1,
"Count": 1,
"ChargeType": "Month",
}
try:
resp = client.uphost().get_phost_price(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribePHostTagsResponse"),
],
action="DescribePHostTags",
)
def describe_phost_tags_02(client, variables):
d = {"Zone": variables.get("Zone"), "Region": variables.get("Region")}
try:
resp = client.uphost().describe_phost_tags(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "CreatePHostResponse"),
],
action="CreatePHost",
)
def create_phost_03(client, variables):
d = {
"Zone": variables.get("Zone"),
"Remark": variables.get("Remark"),
"Region": variables.get("Region"),
"Password": variables.get("Password"),
"Name": variables.get("Name"),
"ImageId": variables.get("ImageID1"),
"ChargeType": variables.get("ChargeType"),
}
try:
resp = client.uphost().create_phost(d)
except exc.RetCodeException as e:
resp = e.json()
variables["PHost"] = utest.value_at_path(resp, "PHostId.0")
return resp
@scenario.step(
max_retries=60,
retry_interval=60,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribePHostResponse"),
("str_eq", "PHostSet.0.PHostType", variables.get("Type")),
("str_eq", "PHostSet.0.Name", variables.get("Name")),
("str_eq", "PHostSet.0.PHostId", variables.get("PHost")),
("str_eq", "PHostSet.0.Remark", variables.get("Remark")),
("str_eq", "PHostSet.0.Zone", variables.get("Zone")),
("str_eq", "PHostSet.0.PMStatus", "Running"),
],
action="DescribePHost",
)
def describe_phost_04(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"PHostId": [variables.get("PHost")],
}
try:
resp = client.uphost().describe_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=600,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "RebootPHostResponse"),
],
action="RebootPHost",
)
def reboot_phost_05(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"ProjectId": variables.get("ProjectId"),
"PHostId": variables.get("PHost"),
}
try:
resp = client.uphost().reboot_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=120,
retry_interval=60,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "PHostSet.0.PMStatus", "Running"),
],
action="DescribePHost",
)
def describe_phost_06(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"PHostId": [variables.get("PHost")],
}
try:
resp = client.uphost().describe_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=10,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "StopPHostResponse"),
],
action="StopPHost",
)
def stop_phost_07(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"ProjectId": variables.get("ProjectId"),
"PHostId": variables.get("PHost"),
}
try:
resp = client.invoke("StopPHost", d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=30,
retry_interval=60,
startup_delay=10,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "PHostSet.0.PMStatus", "Stopped"),
],
action="DescribePHost",
)
def describe_phost_08(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"PHostId": [variables.get("PHost")],
}
try:
resp = client.uphost().describe_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=10,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "ReinstallPHostResponse"),
],
action="ReinstallPHost",
)
def reinstall_phost_09(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"Password": variables.get("Password"),
"PHostId": variables.get("PHost"),
"ImageId": variables.get("ImageID2"),
}
try:
resp = client.uphost().reinstall_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=120,
retry_interval=60,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribePHostResponse"),
("str_eq", "PHostSet.0.PHostType", variables.get("Type")),
("str_eq", "PHostSet.0.Name", variables.get("Name")),
("str_eq", "PHostSet.0.PHostId", variables.get("PHost")),
("str_eq", "PHostSet.0.Remark", variables.get("Remark")),
("str_eq", "PHostSet.0.Zone", variables.get("Zone")),
("str_eq", "PHostSet.0.PMStatus", "Running"),
],
action="DescribePHost",
)
def describe_phost_10(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"PHostId": [variables.get("PHost")],
}
try:
resp = client.uphost().describe_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "PoweroffPHostResponse"),
],
action="PoweroffPHost",
)
def poweroff_phost_11(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"ProjectId": variables.get("ProjectId"),
"PHostId": variables.get("PHost"),
}
try:
resp = client.uphost().poweroff_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=30,
retry_interval=60,
startup_delay=0,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "PHostSet.0.PMStatus", "Stopped"),
],
action="DescribePHost",
)
def describe_phost_12(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"PHostId": [variables.get("PHost")],
}
try:
resp = client.uphost().describe_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=10,
fast_fail=True,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "TerminatePHostResponse"),
],
action="TerminatePHost",
)
def terminate_phost_13(client, variables):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"ProjectId": variables.get("ProjectId"),
"PHostId": variables.get("PHost"),
}
try:
resp = client.uphost().terminate_phost(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
| 26.874346 | 75 | 0.600331 | 1,137 | 10,266 | 5.281442 | 0.133685 | 0.117902 | 0.042631 | 0.053289 | 0.762531 | 0.759201 | 0.733888 | 0.727893 | 0.714571 | 0.714571 | 0 | 0.01914 | 0.236606 | 10,266 | 381 | 76 | 26.944882 | 0.747097 | 0.007208 | 0 | 0.714697 | 1 | 0 | 0.192145 | 0.017575 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043228 | false | 0.008646 | 0.011527 | 0 | 0.095101 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
726c590ceb6774fabbdef9aaffede9a11b9fd305 | 2,964 | py | Python | src/currencies_data/eur_list_1H.py | Serrat96/Financial_Correlations | 0d894e5986440d23edc272e2ac86885efdf9fc15 | [
"MIT"
] | null | null | null | src/currencies_data/eur_list_1H.py | Serrat96/Financial_Correlations | 0d894e5986440d23edc272e2ac86885efdf9fc15 | [
"MIT"
] | null | null | null | src/currencies_data/eur_list_1H.py | Serrat96/Financial_Correlations | 0d894e5986440d23edc272e2ac86885efdf9fc15 | [
"MIT"
] | null | null | null | import pandas as pd
eur_aud_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURAUD_Candlestick_1_h_BID_07.10.2005-20.02.2021.csv")
eur_cad_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURCAD_Candlestick_1_h_BID_25.10.2004-20.02.2021.csv")
eur_chf_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURCHF_Candlestick_1_h_BID_03.08.2003-20.02.2021.csv")
eur_dkk_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURDKK_Candlestick_1_h_BID_25.10.2004-20.02.2021.csv")
eur_gbp_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURGBP_Candlestick_1_h_BID_03.08.2003-20.02.2021.csv")
eur_hkd_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURHKD_Candlestick_1_h_BID_13.03.2007-20.02.2021.csv")
eur_huf_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURHUF_Candlestick_1_h_BID_14.03.2007-20.02.2021.csv")
eur_jpy_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURJPY_Candlestick_1_h_BID_03.08.2003-20.02.2021.csv")
eur_nok_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURNOK_Candlestick_1_h_BID_25.10.2004-20.02.2021.csv")
eur_nzd_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURNZD_Candlestick_1_h_BID_02.01.2006-20.02.2021.csv")
eur_pln_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURPLN_Candlestick_1_h_BID_14.03.2007-20.02.2021.csv")
eur_rub_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURRUB_Candlestick_1_h_BID_13.03.2007-20.02.2021.csv")
eur_sek_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURSEK_Candlestick_1_h_BID_27.10.2004-20.02.2021.csv")
eur_sgd_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURSGD_Candlestick_1_h_BID_13.03.2007-20.02.2021.csv")
eur_try_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURTRY_Candlestick_1_h_BID_13.03.2007-20.02.2021.csv")
eur_usd_1h = pd.read_csv("https://raw.githubusercontent.com/Serrat96/DukascopyData/master/DUKASCOPY_EUR_1H_BID/EURUSD_Candlestick_1_h_BID_04.05.2003-20.02.2021.csv")
currencies_list_1h = [eur_aud_1h, eur_cad_1h, eur_chf_1h, eur_dkk_1h, eur_gbp_1h, eur_hkd_1h,\
eur_huf_1h, eur_jpy_1h, eur_nok_1h, eur_nzd_1h, eur_pln_1h, eur_rub_1h, eur_sek_1h,\
eur_sgd_1h, eur_try_1h, eur_usd_1h] | 80.108108 | 165 | 0.826586 | 519 | 2,964 | 4.315992 | 0.140655 | 0.028571 | 0.057143 | 0.078571 | 0.824554 | 0.805357 | 0.805357 | 0.796429 | 0.796429 | 0.796429 | 0 | 0.125089 | 0.047908 | 2,964 | 37 | 166 | 80.108108 | 0.668675 | 0 | 0 | 0 | 0 | 0.8 | 0.739292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
726fa7ae2c16b748bb2a8d85479231b1a8c79919 | 90 | py | Python | fmath.py | daros10/Sintax_Python | e22cba899c3f9045d10109c34288692eba8c7923 | [
"MIT"
] | null | null | null | fmath.py | daros10/Sintax_Python | e22cba899c3f9045d10109c34288692eba8c7923 | [
"MIT"
] | null | null | null | fmath.py | daros10/Sintax_Python | e22cba899c3f9045d10109c34288692eba8c7923 | [
"MIT"
] | null | null | null | #My own module
def add(n1, n2):
print(n1+n2)
def sub(n1, n2):
print(n1-n2)
| 11.25 | 16 | 0.544444 | 17 | 90 | 2.882353 | 0.529412 | 0.326531 | 0.367347 | 0.44898 | 0.530612 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 0.288889 | 90 | 8 | 17 | 11.25 | 0.640625 | 0.144444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
72a56b86c6d42aa2a54de381e5eb327041a5af30 | 19,018 | py | Python | mayan/apps/permissions/tests/test_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/permissions/tests/test_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/permissions/tests/test_api.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from rest_framework import status
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from mayan.apps.user_management.tests.mixins import GroupTestMixin
from mayan.apps.user_management.permissions import (
permission_group_edit, permission_group_view
)
from ..classes import Permission
from ..events import event_role_created, event_role_edited
from ..models import Role
from ..permissions import (
permission_role_create, permission_role_delete,
permission_role_edit, permission_role_view
)
from .mixins import (
PermissionAPIViewTestMixin, RoleAPIViewTestMixin,
RoleGroupAPIViewTestMixin, RolePermissionAPIViewTestMixin, RoleTestMixin
)
class PermissionAPIViewTestCase(PermissionAPIViewTestMixin, BaseAPITestCase):
def setUp(self):
super().setUp()
Permission.invalidate_cache()
def test_permissions_list_api_view(self):
self._clear_events()
response = self._request_permissions_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class RoleAPIViewTestCase(
GroupTestMixin, RoleAPIViewTestMixin, RoleTestMixin, BaseAPITestCase
):
def test_role_create_api_view_no_permission(self):
role_count = Role.objects.count()
self._clear_events()
response = self._request_test_role_create_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Role.objects.count(), role_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_create_api_view_with_permission(self):
self.grant_permission(permission=permission_role_create)
role_count = Role.objects.count()
self._clear_events()
response = self._request_test_role_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.count(), role_count + 1)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_created.id)
def test_role_delete_api_view_no_permission(self):
self._create_test_role()
role_count = Role.objects.count()
self._clear_events()
response = self._request_test_role_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(Role.objects.count(), role_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_delete_api_view_with_access(self):
self._create_test_role()
self.grant_access(obj=self.test_role, permission=permission_role_delete)
role_count = Role.objects.count()
self._clear_events()
response = self._request_test_role_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Role.objects.count(), role_count - 1)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_edit_api_view_via_patch_no_permission(self):
self._create_test_role()
role_label = self.test_role.label
self._clear_events()
response = self._request_test_role_edit_api_view(request_type='patch')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_role.refresh_from_db()
self.assertEqual(self.test_role.label, role_label)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_edit_api_view_via_patch_with_access(self):
self._create_test_role()
self.grant_access(obj=self.test_role, permission=permission_role_edit)
role_label = self.test_role.label
self._clear_events()
response = self._request_test_role_edit_api_view(request_type='patch')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_role.refresh_from_db()
self.assertNotEqual(self.test_role.label, role_label)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
def test_role_edit_api_view_via_put_no_permission(self):
self._create_test_role()
self._clear_events()
response = self._request_test_role_edit_api_view(request_type='put')
role_label = self.test_role.label
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_role.refresh_from_db()
self.assertEqual(self.test_role.label, role_label)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_edit_api_view_via_put_with_access(self):
self._create_test_role()
self.grant_access(obj=self.test_role, permission=permission_role_edit)
role_label = self.test_role.label
self._clear_events()
response = self._request_test_role_edit_api_view(request_type='put')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_role.refresh_from_db()
self.assertNotEqual(self.test_role.label, role_label)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
def test_roles_list_api_view_no_permission(self):
self._create_test_role()
self._clear_events()
response = self._request_test_role_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_roles_list_api_view_with_access(self):
self._create_test_role()
self.grant_access(
obj=self.test_role, permission=permission_role_view
)
self._clear_events()
response = self._request_test_role_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(
response.data['results'][0]['label'], self.test_role.label
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class RoleGroupAPIViewTestCase(
GroupTestMixin, RoleTestMixin, RoleGroupAPIViewTestMixin,
BaseAPITestCase
):
auto_create_role_test_object = True
def setUp(self):
super().setUp()
self._create_test_role()
self._create_test_group()
def test_role_group_add_api_view_no_group(self):
self._clear_events()
response = self._request_test_role_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(self.test_group in self.test_role.groups.all())
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_add_api_view_with_group_access(self):
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_role_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(self.test_group in self.test_role.groups.all())
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_add_api_view_with_role_access(self):
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
self._clear_events()
response = self._request_test_role_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.test_group in self.test_role.groups.all())
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_add_api_view_with_full_access(self):
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
self._clear_events()
response = self._request_test_role_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(self.test_group in self.test_role.groups.all())
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_group)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
def test_role_group_list_api_view_no_group(self):
self.test_role.groups.add(self.test_group)
self._clear_events()
response = self._request_test_role_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_list_api_view_with_group_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_view
)
self._clear_events()
response = self._request_test_role_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_list_api_view_with_role_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_role, permission=permission_role_view
)
self._clear_events()
response = self._request_test_role_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_list_api_view_with_full_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_view
)
self.grant_access(
obj=self.test_role, permission=permission_role_view
)
self._clear_events()
response = self._request_test_role_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['results'][0]['id'],
self.test_group.pk
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_remove_api_view_no_group(self):
self.test_role.groups.add(self.test_group)
role_group_count = self.test_role.groups.count()
self._clear_events()
response = self._request_test_role_group_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
self.test_role.groups.count(), role_group_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_remove_api_view_with_group_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
role_group_count = self.test_role.groups.count()
self._clear_events()
response = self._request_test_role_group_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
self.test_role.groups.count(), role_group_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_remove_api_view_with_role_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
role_group_count = self.test_role.groups.count()
self._clear_events()
response = self._request_test_role_group_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
self.test_role.groups.count(), role_group_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_group_remove_api_view_with_full_access(self):
self.test_role.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
role_group_count = self.test_role.groups.count()
self._clear_events()
response = self._request_test_role_group_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
self.test_role.groups.count(), role_group_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_group)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
class RolePermissionAPIViewTestCase(
RoleTestMixin, RolePermissionAPIViewTestMixin, BaseAPITestCase
):
auto_create_role_test_object = True
def setUp(self):
super().setUp()
self._create_test_role()
self._create_test_permission()
def test_role_permission_add_api_view_no_permission(self):
self._clear_events()
response = self._request_test_role_permission_add_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertFalse(
self.test_permission.stored_permission in self.test_role.permissions.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_permission_add_api_view_with_access(self):
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
self._clear_events()
response = self._request_test_role_permission_add_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(
self.test_permission.stored_permission in self.test_role.permissions.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_permission.stored_permission
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
def test_role_permission_list_api_view_no_permission(self):
self.test_role.permissions.add(self.test_permission.stored_permission)
self._clear_events()
response = self._request_test_role_permission_list_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_permission_list_api_view_with_access(self):
self.test_role.permissions.add(self.test_permission.stored_permission)
self.grant_access(
obj=self.test_role, permission=permission_role_view
)
self._clear_events()
response = self._request_test_role_permission_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['results'][0]['pk'],
self.test_permission.pk
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_permission_remove_api_view_no_permission(self):
self.test_role.permissions.add(
self.test_permission.stored_permission
)
role_permission_count = self.test_role.permissions.count()
self._clear_events()
response = self._request_test_role_permission_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
self.test_role.permissions.count(), role_permission_count
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_role_permission_remove_api_view_with_access(self):
self.test_role.permissions.add(
self.test_permission.stored_permission
)
self.grant_access(
obj=self.test_role, permission=permission_role_edit
)
role_permission_count = self.test_role.permissions.count()
self._clear_events()
response = self._request_test_role_permission_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
self.test_role.permissions.count(), role_permission_count - 1
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(
events[0].action_object, self.test_permission.stored_permission
)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_role)
self.assertEqual(events[0].verb, event_role_edited.id)
| 33.482394 | 87 | 0.678042 | 2,307 | 19,018 | 5.171218 | 0.045947 | 0.085163 | 0.06337 | 0.055909 | 0.917016 | 0.905448 | 0.894719 | 0.879715 | 0.875189 | 0.870495 | 0 | 0.010563 | 0.23341 | 19,018 | 567 | 88 | 33.541446 | 0.807737 | 0 | 0 | 0.721228 | 0 | 0 | 0.003306 | 0 | 0 | 0 | 0 | 0 | 0.286445 | 1 | 0.081841 | false | 0 | 0.023018 | 0 | 0.120205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
72cfc984aae4d5fe8985de1de1dd314b97699ab6 | 21,573 | py | Python | tests/wrappers/test_lambda_wrapper.py | clericeon/epsagon-python | 387b785708d5b6ac0e8a9f8562c52f56d0825cdf | [
"MIT"
] | null | null | null | tests/wrappers/test_lambda_wrapper.py | clericeon/epsagon-python | 387b785708d5b6ac0e8a9f8562c52f56d0825cdf | [
"MIT"
] | null | null | null | tests/wrappers/test_lambda_wrapper.py | clericeon/epsagon-python | 387b785708d5b6ac0e8a9f8562c52f56d0825cdf | [
"MIT"
] | null | null | null | import json
import mock
import pytest
import warnings
import epsagon.wrappers.aws_lambda
from epsagon import trace_factory
from epsagon.trace import FAILED_TO_SERIALIZE_MESSAGE
from epsagon.runners.aws_lambda import LambdaRunner, StepLambdaRunner
import epsagon.constants
from .common import get_tracer_patch_kwargs
trace_mock = mock.MagicMock()
def setup_function(func):
trace_factory.use_single_trace = True
trace_mock.configure_mock(**get_tracer_patch_kwargs())
epsagon.constants.COLD_START = True
def _get_runner_event(trace_mock, runner_type=LambdaRunner):
for args, _ in trace_mock.set_runner.call_args_list:
event = args[0]
if isinstance(event, runner_type):
return event
assert False, "No runner found"
CONTEXT_STUB = type(
'Context',
(object,),
{
'aws_request_id': 'test_request_id',
'function_name': 'TestFunction',
'log_stream_name': 'test_stream',
'log_group_name': 'test_group',
'function_version': 'test_version',
'memory_limit_in_mb': '1024',
'invoked_function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:TestFunction'
}
)
# aws_lambda tests
@mock.patch.object(LambdaRunner, 'set_exception')
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_lambda_wrapper_sanity(
trigger_factory_mock,
_,
set_exception_mock
):
retval = 'success'
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
return 'success'
assert wrapped_lambda('a', CONTEXT_STUB) == 'success'
trace_mock.prepare.assert_called()
runner = _get_runner_event(trace_mock)
trigger_factory_mock.assert_called()
set_exception_mock.assert_not_called()
trace_mock.set_timeout_handler.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
assert runner.resource['metadata']['return_value'] == retval
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_lambda_wrapper_lambda_exception(trigger_factory_mock, _):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
raise TypeError('test')
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.LambdaRunner',
side_effect=[lambda_runner_mock]
):
with pytest.raises(TypeError):
wrapped_lambda('a', CONTEXT_STUB)
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
def test_lambda_wrapper_lambda_exception_args(_):
"""
Tests that when user invoking Lambda's handler manually with kwargs,
trace won't be sent, and return value is ok.
"""
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
return 'success'
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.LambdaRunner',
side_effect=[lambda_runner_mock]
):
assert wrapped_lambda(event='a', context='b') == 'success'
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_not_called()
trace_mock.send_traces.assert_not_called()
trace_mock.add_exception.assert_not_called()
assert epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=TypeError()
)
def test_lambda_wrapper_trigger_exception(trigger_factory_mock, _):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
pass
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.LambdaRunner',
side_effect=[lambda_runner_mock]
):
wrapped_lambda('a', CONTEXT_STUB)
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.set_runner.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.wrappers.python_function.wrap_python_function',
side_effect=['success']
)
def test_lambda_wrapper_none_context(wrap_python_function_wrapper, _):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
# Doesn't matter, we are mocking wrap_python_function
return 'something'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.LambdaRunner',
side_effect=TypeError()
):
assert wrapped_lambda('a', None) == 'success'
assert len(w) == 1
trace_mock.prepare.assert_called()
wrap_python_function_wrapper.assert_called()
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.wrappers.python_function.wrap_python_function',
side_effect=['success']
)
def test_lambda_wrapper_lambda_runner_factory_failed(
wrap_python_function_wrapper, _):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
# Doesn't matter, we are mocking wrap_python_function
return 'something'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with mock.patch(
'epsagon.runners.aws_lambda.LambdaRunner',
side_effect=TypeError()
):
assert wrapped_lambda('a', CONTEXT_STUB) == 'success'
assert len(w) == 1
trace_mock.prepare.assert_called()
wrap_python_function_wrapper.assert_called()
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
def test_lambda_wrapper_result_status_code(_):
result = {'statusCode': 200}
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_function(event, context):
return result
assert wrapped_function('a', CONTEXT_STUB) == result
trace_mock.prepare.assert_called_once()
runner = _get_runner_event(trace_mock)
trace_mock.send_traces.assert_called_once()
trace_mock.add_exception.assert_not_called()
assert (
runner.resource['metadata']['status_code'] == result['statusCode']
)
# step_lambda_wrapper tests
@mock.patch.object(StepLambdaRunner, 'set_exception')
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_step_lambda_wrapper_sanity_first_step(
trigger_factory_mock,
_,
set_exception_mock
):
retval = {'result': 'success'}
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
return retval
result = wrapped_lambda('a', CONTEXT_STUB)
assert ('result', 'success',) in result.items()
assert 'Epsagon' in result
assert ('step_num', 0,) in result['Epsagon'].items()
assert 'id' in result['Epsagon']
trigger_factory_mock.assert_called()
set_exception_mock.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.set_runner.assert_called()
trace_mock.set_timeout_handler.assert_called()
runner = _get_runner_event(trace_mock, runner_type=StepLambdaRunner)
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
assert runner.resource['metadata']['return_value'] == retval
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_step_lambda_wrapper_sanity_not_first_step(trigger_factory_mock,
_):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
return {'result': 'success'}
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=[lambda_runner_mock]
):
result = wrapped_lambda(
{'a': 'a', 'Epsagon': {'step_num': 1, 'id': 1}},
CONTEXT_STUB
)
assert ('result', 'success',) in result.items()
assert 'Epsagon' in result
assert ('step_num', 2,) in result['Epsagon'].items()
assert 'id' in result['Epsagon']
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_step_lambda_wrapper_sanity_not_first_step_unqiue_path(trigger_factory_mock,
_):
unique_path = 'data'
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
return {'result': 'success', unique_path: {'test':' test'}}
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=[lambda_runner_mock]
):
result = wrapped_lambda(
{'a': 'a', unique_path: {'Epsagon': {'step_num': 1, 'id': 1}}},
CONTEXT_STUB
)
assert ('result', 'success',) in result.items()
assert 'Epsagon' in result[unique_path]
assert ('step_num', 2,) in result[unique_path]['Epsagon'].items()
assert 'id' in result[unique_path]['Epsagon']
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_step_lambda_wrapper_wrapped_function_doesnt_return_object(
trigger_factory_mock, _):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
return 'success'
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=[lambda_runner_mock]
):
assert wrapped_lambda(
{'a': 'a', 'Epsagon': {'step_num': 1, 'id': 1}},
CONTEXT_STUB
) == 'success'
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_step_lambda_wrapper_lambda_exception(trigger_factory_mock, _):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
raise TypeError('test')
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=[lambda_runner_mock]
):
with pytest.raises(TypeError):
wrapped_lambda('a', CONTEXT_STUB)
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_called()
trace_mock.prepare.assert_called()
trace_mock.add_event.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=TypeError()
)
def test_step_lambda_wrapper_trigger_exception(trigger_factory_mock,
_):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
pass
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=[lambda_runner_mock]
):
wrapped_lambda('a', CONTEXT_STUB)
trigger_factory_mock.assert_called()
lambda_runner_mock.set_exception.assert_not_called()
trace_mock.prepare.assert_called()
trace_mock.set_runner.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_called()
assert not epsagon.constants.COLD_START
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.wrappers.python_function.wrap_python_function',
side_effect=['success']
)
def test_step_lambda_wrapper_none_context(wrap_python_function_wrapper,
_):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
# Doesn't matter, we are mocking wrap_python_function
return 'something'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
lambda_runner_mock = mock.MagicMock(set_exception=mock.MagicMock())
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=TypeError()
):
assert wrapped_lambda('a', None) == 'success'
assert len(w) == 1
trace_mock.prepare.assert_called()
wrap_python_function_wrapper.assert_called()
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.wrappers.python_function.wrap_python_function',
side_effect=['success']
)
def test_step_lambda_wrapper_lambda_runner_factory_failed(
wrap_python_function_wrapper, _):
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_lambda(event, context):
# Doesn't matter, we are mocking wrap_python_function
return 'something'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with mock.patch(
'epsagon.runners.aws_lambda.StepLambdaRunner',
side_effect=TypeError()
):
assert wrapped_lambda('a', CONTEXT_STUB) == 'success'
assert len(w) == 1
trace_mock.prepare.assert_called()
wrap_python_function_wrapper.assert_called()
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
def test_step_lambda_wrapper_result_status_code(_):
result = {'statusCode': 200}
@epsagon.wrappers.aws_lambda.step_lambda_wrapper
def wrapped_function(event, context):
return result
assert wrapped_function('a', CONTEXT_STUB) == result
trace_mock.prepare.assert_called_once()
runner = _get_runner_event(trace_mock, runner_type=StepLambdaRunner)
trace_mock.send_traces.assert_called_once()
trace_mock.add_exception.assert_not_called()
assert (
runner.resource['metadata']['status_code'] ==
result['statusCode']
)
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
def test_lambda_wrapper_with_alias_arn(
_,
):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
return 'success'
orig_arn = CONTEXT_STUB.invoked_function_arn
CONTEXT_STUB.invoked_function_arn = (
'arn:aws:lambda:us-east-1:123456789012:function:TestFunction:test_alias'
)
assert wrapped_lambda('a', CONTEXT_STUB) == 'success'
CONTEXT_STUB.invoked_function_arn = orig_arn
trace_mock.prepare.assert_called()
runner = _get_runner_event(trace_mock)
assert runner.resource['metadata']['function_alias'] == 'test_alias'
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
def test_lambda_wrapper_single_thread(_):
retval = 'success'
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
return retval
assert wrapped_lambda('a', CONTEXT_STUB) == retval
assert trace_factory.use_single_trace
def test_lambda_wrapper_avoid_multi_wrap():
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(event, context):
return 'success'
new_wrapped = epsagon.wrappers.aws_lambda.lambda_wrapper(wrapped_lambda)
assert getattr(wrapped_lambda, '__instrumented__', False) == True
assert new_wrapped == wrapped_lambda
@mock.patch.object(LambdaRunner, 'set_exception')
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_propagate_lambda_id_to_dict_sanity(
trigger_factory_mock,
_,
set_exception_mock
):
retval = {
'hello': 2,
epsagon.constants.EPSAGON_EVENT_ID_KEY: 'test_request_id'
}
trace_mock.propagate_lambda_id = True
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(_event, _context):
return {'hello': 2}
assert wrapped_lambda('a', CONTEXT_STUB) == retval
trace_mock.propagate_lambda_id = False
trace_mock.prepare.assert_called()
runner = _get_runner_event(trace_mock)
trigger_factory_mock.assert_called()
set_exception_mock.assert_not_called()
trace_mock.set_timeout_handler.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
assert runner.resource['metadata']['return_value'] == retval
@mock.patch.object(LambdaRunner, 'set_exception')
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'epsagon.triggers.aws_lambda.LambdaTriggerFactory.factory',
side_effect=['trigger']
)
def test_skip_propagate_lambda_id_to_non_dict_sanity(
trigger_factory_mock,
_,
set_exception_mock
):
retval = 'hey'
trace_mock.propagate_lambda_id = True
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(_event, _context):
return 'hey'
assert wrapped_lambda('a', CONTEXT_STUB) == retval
trace_mock.propagate_lambda_id = False
trace_mock.prepare.assert_called()
runner = _get_runner_event(trace_mock)
trigger_factory_mock.assert_called()
set_exception_mock.assert_not_called()
trace_mock.set_timeout_handler.assert_called()
trace_mock.send_traces.assert_called()
trace_mock.add_exception.assert_not_called()
assert not epsagon.constants.COLD_START
assert runner.resource['metadata']['return_value'] == retval
@mock.patch(
'epsagon.trace.trace_factory.get_or_create_trace',
side_effect=lambda: trace_mock
)
@mock.patch(
'time.time',
return_value=1.5
)
def test_cold_start_duration(_, __):
@epsagon.wrappers.aws_lambda.lambda_wrapper
def wrapped_lambda(_event, _context):
return ''
epsagon.constants.COLD_START_TIME = 0
assert epsagon.constants.COLD_START
wrapped_lambda('a', CONTEXT_STUB)
assert not epsagon.constants.COLD_START
runner = _get_runner_event(trace_mock)
assert runner.resource['metadata']['aws.lambda.cold_start_duration'] == 1.5
| 30.556657 | 93 | 0.718259 | 2,636 | 21,573 | 5.495827 | 0.064492 | 0.060882 | 0.053013 | 0.049286 | 0.903362 | 0.892179 | 0.874439 | 0.8616 | 0.855111 | 0.843791 | 0 | 0.003226 | 0.181013 | 21,573 | 705 | 94 | 30.6 | 0.816731 | 0.016919 | 0 | 0.75 | 0 | 0 | 0.165038 | 0.116603 | 0 | 0 | 0 | 0 | 0.253571 | 1 | 0.082143 | false | 0.003571 | 0.017857 | 0.032143 | 0.133929 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
72ed243c2564536322fe3e9adfb400eaf1dc1cde | 11,300 | py | Python | tests/hwsim/test_sta_dynamic.py | humdingerb/wpa_supplicant | 6b583bf48948a573421f94cc1263f921dba23ecc | [
"Unlicense"
] | 45 | 2021-07-01T10:10:07.000Z | 2022-03-07T06:52:42.000Z | tests/hwsim/test_sta_dynamic.py | kareem-wolfssl/wolfssl_hostapd | df2d4bae478c99086db2decc662ef440079fa63f | [
"Unlicense"
] | 8 | 2021-12-10T10:58:03.000Z | 2022-03-15T10:28:16.000Z | tests/hwsim/test_sta_dynamic.py | kareem-wolfssl/wolfssl_hostapd | df2d4bae478c99086db2decc662ef440079fa63f | [
"Unlicense"
] | 11 | 2016-08-02T20:01:01.000Z | 2022-02-21T09:51:05.000Z | # Dynamic wpa_supplicant interface
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import subprocess
import time
import hwsim_utils
import hostapd
from wpasupplicant import WpaSupplicant
def test_sta_dynamic(dev, apdev):
"""Dynamically added wpa_supplicant interface"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hostapd.add_ap(apdev[0], params)
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412")
def test_sta_ap_scan_0(dev, apdev):
"""Dynamically added wpa_supplicant interface with AP_SCAN 0 connection"""
hostapd.add_ap(apdev[0], {"ssid": "test"})
bssid = apdev[0]['bssid']
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
if "OK" not in wpas.request("AP_SCAN 0"):
raise Exception("Failed to set AP_SCAN 2")
id = wpas.connect("", key_mgmt="NONE", bssid=bssid,
only_add_network=True)
wpas.request("ENABLE_NETWORK " + str(id) + " no-connect")
wpas.request("SCAN")
time.sleep(0.5)
subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412'])
wpas.wait_connected(timeout=10)
wpas.request("SCAN")
wpas.wait_connected(timeout=5)
def test_sta_ap_scan_2(dev, apdev):
"""Dynamically added wpa_supplicant interface with AP_SCAN 2 connection"""
hostapd.add_ap(apdev[0], {"ssid": "test"})
bssid = apdev[0]['bssid']
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
if "FAIL" not in wpas.request("AP_SCAN -1"):
raise Exception("Invalid AP_SCAN -1 accepted")
if "FAIL" not in wpas.request("AP_SCAN 3"):
raise Exception("Invalid AP_SCAN 3 accepted")
if "OK" not in wpas.request("AP_SCAN 2"):
raise Exception("Failed to set AP_SCAN 2")
id = wpas.connect("", key_mgmt="NONE", bssid=bssid,
only_add_network=True)
wpas.request("ENABLE_NETWORK " + str(id) + " no-connect")
subprocess.call(['iw', wpas.ifname, 'scan', 'trigger', 'freq', '2412'])
time.sleep(1)
subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412'])
wpas.wait_connected(timeout=10)
wpas.request("SET disallow_aps bssid " + bssid)
wpas.wait_disconnected(timeout=10)
subprocess.call(['iw', wpas.ifname, 'connect', 'test', '2412'])
ev = wpas.wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection reported")
def test_sta_ap_scan_2b(dev, apdev):
"""Dynamically added wpa_supplicant interface with AP_SCAN 2 operation"""
hapd = hostapd.add_ap(apdev[0], {"ssid": "test"})
bssid = apdev[0]['bssid']
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
if "OK" not in wpas.request("AP_SCAN 2"):
raise Exception("Failed to set AP_SCAN 2")
id = wpas.connect("test", key_mgmt="NONE", bssid=bssid)
wpas.request("DISCONNECT")
wpas.set_network(id, "disabled", "1")
id2 = wpas.add_network()
wpas.set_network_quoted(id2, "ssid", "test2")
wpas.set_network(id2, "key_mgmt", "NONE")
wpas.set_network(id2, "disabled", "0")
wpas.request("REASSOCIATE")
ev = wpas.wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=15)
if ev is None:
raise Exception("Association rejection not reported")
hapd.disable()
wpas.set_network(id, "disabled", "0")
wpas.set_network(id2, "disabled", "1")
for i in range(3):
ev = wpas.wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=15)
if ev is None:
raise Exception("Association rejection not reported")
wpas.request("DISCONNECT")
def test_sta_dynamic_down_up(dev, apdev):
"""Dynamically added wpa_supplicant interface down/up"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412")
hapd.wait_sta()
hwsim_utils.test_connectivity(wpas, hapd)
subprocess.call(['ifconfig', wpas.ifname, 'down'])
wpas.wait_disconnected(timeout=10)
if wpas.get_status_field("wpa_state") != "INTERFACE_DISABLED":
raise Exception("Unexpected wpa_state")
subprocess.call(['ifconfig', wpas.ifname, 'up'])
wpas.wait_connected(timeout=15, error="Reconnection not reported")
hapd.wait_sta()
hwsim_utils.test_connectivity(wpas, hapd)
def test_sta_dynamic_ext_mac_addr_change(dev, apdev):
"""Dynamically added wpa_supplicant interface with external MAC address change"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
logger.info("Create a dynamic wpa_supplicant interface and connect")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412")
hapd.wait_sta()
hwsim_utils.test_connectivity(wpas, hapd)
subprocess.call(['ifconfig', wpas.ifname, 'down'])
wpas.wait_disconnected(timeout=10)
if wpas.get_status_field("wpa_state") != "INTERFACE_DISABLED":
raise Exception("Unexpected wpa_state")
prev_addr = wpas.p2p_interface_addr()
new_addr = '02:11:22:33:44:55'
try:
subprocess.call(['ip', 'link', 'set', 'dev', wpas.ifname,
'address', new_addr])
subprocess.call(['ifconfig', wpas.ifname, 'up'])
wpas.wait_connected(timeout=15, error="Reconnection not reported")
if wpas.get_driver_status_field('addr') != new_addr:
raise Exception("Address change not reported")
hapd.wait_sta()
hwsim_utils.test_connectivity(wpas, hapd)
sta = hapd.get_sta(new_addr)
if sta['addr'] != new_addr:
raise Exception("STA association with new address not found")
finally:
subprocess.call(['ifconfig', wpas.ifname, 'down'])
subprocess.call(['ip', 'link', 'set', 'dev', wpas.ifname,
'address', prev_addr])
subprocess.call(['ifconfig', wpas.ifname, 'up'])
def test_sta_dynamic_random_mac_addr(dev, apdev):
"""Dynamically added wpa_supplicant interface and random MAC address"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
addr0 = wpas.get_driver_status_field("addr")
wpas.request("SET preassoc_mac_addr 1")
wpas.request("SET rand_addr_lifetime 0")
id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="1",
scan_freq="2412")
addr1 = wpas.get_driver_status_field("addr")
if addr0 == addr1:
raise Exception("Random MAC address not used")
sta = hapd.get_sta(addr0)
if sta['addr'] != "FAIL":
raise Exception("Unexpected STA association with permanent address")
sta = hapd.get_sta(addr1)
if sta['addr'] != addr1:
raise Exception("STA association with random address not found")
wpas.request("DISCONNECT")
wpas.connect_network(id)
addr2 = wpas.get_driver_status_field("addr")
if addr1 != addr2:
raise Exception("Random MAC address changed unexpectedly")
wpas.remove_network(id)
id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="1",
scan_freq="2412")
addr2 = wpas.get_driver_status_field("addr")
if addr1 == addr2:
raise Exception("Random MAC address did not change")
def test_sta_dynamic_random_mac_addr_keep_oui(dev, apdev):
"""Dynamically added wpa_supplicant interface and random MAC address (keep OUI)"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
addr0 = wpas.get_driver_status_field("addr")
wpas.request("SET preassoc_mac_addr 2")
wpas.request("SET rand_addr_lifetime 0")
id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="2",
scan_freq="2412")
addr1 = wpas.get_driver_status_field("addr")
if addr0 == addr1:
raise Exception("Random MAC address not used")
if addr1[3:8] != addr0[3:8]:
raise Exception("OUI was not kept")
sta = hapd.get_sta(addr0)
if sta['addr'] != "FAIL":
raise Exception("Unexpected STA association with permanent address")
sta = hapd.get_sta(addr1)
if sta['addr'] != addr1:
raise Exception("STA association with random address not found")
wpas.request("DISCONNECT")
wpas.connect_network(id)
addr2 = wpas.get_driver_status_field("addr")
if addr1 != addr2:
raise Exception("Random MAC address changed unexpectedly")
wpas.remove_network(id)
id = wpas.connect("sta-dynamic", psk="12345678", mac_addr="2",
scan_freq="2412")
addr2 = wpas.get_driver_status_field("addr")
if addr1 == addr2:
raise Exception("Random MAC address did not change")
if addr2[3:8] != addr0[3:8]:
raise Exception("OUI was not kept")
def test_sta_dynamic_random_mac_addr_scan(dev, apdev):
"""Dynamically added wpa_supplicant interface and random MAC address for scan"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
addr0 = wpas.get_driver_status_field("addr")
wpas.request("SET preassoc_mac_addr 1")
wpas.request("SET rand_addr_lifetime 0")
id = wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412")
addr1 = wpas.get_driver_status_field("addr")
if addr0 != addr1:
raise Exception("Random MAC address used unexpectedly")
def test_sta_dynamic_random_mac_addr_scan_keep_oui(dev, apdev):
"""Dynamically added wpa_supplicant interface and random MAC address for scan (keep OUI)"""
params = hostapd.wpa2_params(ssid="sta-dynamic", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
addr0 = wpas.get_driver_status_field("addr")
wpas.request("SET preassoc_mac_addr 2")
wpas.request("SET rand_addr_lifetime 0")
id = wpas.connect("sta-dynamic", psk="12345678", scan_freq="2412")
addr1 = wpas.get_driver_status_field("addr")
if addr0 != addr1:
raise Exception("Random MAC address used unexpectedly")
| 40.213523 | 95 | 0.679735 | 1,516 | 11,300 | 4.895778 | 0.122032 | 0.049043 | 0.050391 | 0.033279 | 0.876583 | 0.830773 | 0.818378 | 0.792239 | 0.760307 | 0.74131 | 0 | 0.037681 | 0.185044 | 11,300 | 280 | 96 | 40.357143 | 0.76827 | 0.075664 | 0 | 0.740909 | 0 | 0 | 0.253802 | 0.004427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.031818 | 0.027273 | 0 | 0.072727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f40d6258ce9af40ee000f6afaea25b386ebc1928 | 166 | py | Python | plenum/test/cli/test_node_create_all.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/cli/test_node_create_all.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/cli/test_node_create_all.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | 2 | 2017-12-13T21:14:54.000Z | 2021-06-06T15:48:03.000Z | from plenum.test.cli.helper import assertAllNodesCreated
def testNodeCreateAll(cli, validNodeNames, createAllNodes):
assertAllNodesCreated(cli, validNodeNames)
| 27.666667 | 59 | 0.837349 | 15 | 166 | 9.266667 | 0.733333 | 0.244604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096386 | 166 | 5 | 60 | 33.2 | 0.926667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
f47ea232a7948c0d9dc109211e1988a9edc297b7 | 158 | py | Python | generate_gerrit_jenkins_project/__init__.py | tom-010/generate_gerrit_jenkins_project | 499c6f4d040cfd673d96487b0c74b3ad51df5d59 | [
"Apache-2.0"
] | null | null | null | generate_gerrit_jenkins_project/__init__.py | tom-010/generate_gerrit_jenkins_project | 499c6f4d040cfd673d96487b0c74b3ad51df5d59 | [
"Apache-2.0"
] | null | null | null | generate_gerrit_jenkins_project/__init__.py | tom-010/generate_gerrit_jenkins_project | 499c6f4d040cfd673d96487b0c74b3ad51df5d59 | [
"Apache-2.0"
] | null | null | null | from generate_gerrit_jenkins_project.generate_gerrit_jenkins_project import generate_gerrit_jenkins_project
def main():
generate_gerrit_jenkins_project() | 39.5 | 107 | 0.892405 | 20 | 158 | 6.45 | 0.4 | 0.434109 | 0.651163 | 0.868217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06962 | 158 | 4 | 108 | 39.5 | 0.877551 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 9 |
be7d0e95ffe734f08ea7349cb5a6d6af6eb8b86a | 170 | py | Python | madness/team.py | turtlebayjai/madness | 19268ffe3fc20f048018656e5dd990ace8f5855a | [
"MIT"
] | null | null | null | madness/team.py | turtlebayjai/madness | 19268ffe3fc20f048018656e5dd990ace8f5855a | [
"MIT"
] | null | null | null | madness/team.py | turtlebayjai/madness | 19268ffe3fc20f048018656e5dd990ace8f5855a | [
"MIT"
] | null | null | null | class Team:
def __init__(self, seed, name=None):
self.seed = seed
self.name = name
def __str__(self):
return f"({self.seed}){self.name}"
| 21.25 | 42 | 0.570588 | 23 | 170 | 3.869565 | 0.478261 | 0.269663 | 0.269663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.288235 | 170 | 7 | 43 | 24.285714 | 0.735537 | 0 | 0 | 0 | 0 | 0 | 0.141176 | 0.141176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.166667 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
beb4c3f5025bd69d3fcf662efb0d6d4fe9dae7d3 | 257 | py | Python | tests/parser/bug.19.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/bug.19.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/bug.19.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | input = """
a(2).
b(1,2).
c(2).
q(X,Y) :- a(X), c(Y).
p(X,Y,Z) :- a(X), q(Y,Z), m(X,Z).
m(X,Y) :- a(Z), p(Z,X,Y).
"""
output = """
a(2).
b(1,2).
c(2).
q(X,Y) :- a(X), c(Y).
p(X,Y,Z) :- a(X), q(Y,Z), m(X,Z).
m(X,Y) :- a(Z), p(Z,X,Y).
"""
| 12.238095 | 34 | 0.315175 | 72 | 257 | 1.125 | 0.180556 | 0.197531 | 0.148148 | 0.098765 | 0.864198 | 0.864198 | 0.864198 | 0.864198 | 0.864198 | 0.864198 | 0 | 0.040609 | 0.233463 | 257 | 20 | 35 | 12.85 | 0.370558 | 0 | 0 | 0.875 | 0 | 0.125 | 0.871369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
fe207ee68377f2b38154dc09952e170c93068912 | 6,709 | py | Python | code/data_loading.py | katherinezhu01/TCRP | f6b3a703c24eeb8ad1698162511e506c6df8d76b | [
"MIT"
] | null | null | null | code/data_loading.py | katherinezhu01/TCRP | f6b3a703c24eeb8ad1698162511e506c6df8d76b | [
"MIT"
] | null | null | null | code/data_loading.py | katherinezhu01/TCRP | f6b3a703c24eeb8ad1698162511e506c6df8d76b | [
"MIT"
] | null | null | null | import numpy as np
import random
import copy
import torch
import torch.utils.data as du
'''
def get_unseen_data_loader( feature, label, cat_label, K ):
num_sample = feature.shape[0]
index_list = np.random.permutation( num_sample )
train_index_list = index_list[0:K]
test_index_list = index_list[K:]
train_feature = torch.FloatTensor( feature[train_index_list,:] )
train_label = torch.FloatTensor( label[train_index_list,:] )
test_feature = torch.FloatTensor( feature[test_index_list,:] )
test_label = torch.FloatTensor( label[test_index_list,:] )
cat_test_label = torch.FloatTensor( cat_label[test_index_list,:] )
train_dataset = du.TensorDataset( train_feature.cuda(), train_label.cuda() )
train_loader = du.DataLoader( train_dataset, batch_size=train_feature.size(0))
test_dataset = du.TensorDataset( test_feature.cuda(), test_label.cuda(), cat_test_label.cuda() )
test_loader = du.DataLoader( test_dataset, batch_size=test_feature.size(0))
return train_loader, test_loader
'''
def get_observed_data_loader(feature, label, tissue_index_list, K, batch_size, tissue_num):
index_list = copy.deepcopy(tissue_index_list)
train_sampled_index_list, test_sampled_index_list = [], []
#print 'tissue index list..', len(tissue_index_list)
random_tissue_index = np.random.permutation( len(tissue_index_list) )
train_tissue_index_list = random_tissue_index[0:tissue_num]
#train_tissue_index_list = random_tissue_index[0:-1]
test_tissue_index_list = random_tissue_index[tissue_num:tissue_num*2]
#test_tissue_index_list = [random_tissue_index[-1]]
#print train_tissue_index_list
#print test_tissue_index_list
for tissue_index in train_tissue_index_list:
sub_list = index_list[tissue_index]
random.shuffle(sub_list)
train_sampled_index_list += sub_list[0:K]
for tissue_index in test_tissue_index_list:
sub_list = index_list[tissue_index]
random.shuffle(sub_list)
test_sampled_index_list += sub_list[0:K]
random.shuffle( train_sampled_index_list )
random.shuffle( test_sampled_index_list )
#print '===', train_sampled_index_list
train_feature = torch.FloatTensor( feature[train_sampled_index_list,:] )
train_label = torch.FloatTensor( label[train_sampled_index_list,:] )
dataset = du.TensorDataset(train_feature, train_label)
loader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )
train_data_list = []
for batch_feature, batch_label in loader:
if batch_feature.size()[0] == 1:
continue
train_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )
#print '===', test_sampled_index_list,feature.shape
test_feature = torch.FloatTensor( feature[test_sampled_index_list,:] )
test_label = torch.FloatTensor( label[test_sampled_index_list,:] )
dataset = du.TensorDataset( test_feature, test_label )
loader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )
test_data_list = []
for batch_feature, batch_label in loader:
if batch_feature.size()[0] == 1:
continue
test_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )
return train_data_list, test_data_list
def get_observed_data_loader2(feature, label, tissue_index_list, K, batch_size):
index_list = copy.deepcopy(tissue_index_list)
train_sampled_index_list, test_sampled_index_list = [], []
for index, sub_list in enumerate(index_list):
random.shuffle(sub_list)
if 2*K < len(sub_list):
train_sampled_index_list += sub_list[0:K]
test_sampled_index_list += sub_list[K:2*K]
elif K < len(sub_list):
train_sampled_index_list += sub_list[0:K]
random.shuffle(sub_list)
test_sampled_index_list += sub_list[0:K]
else:
train_sampled_index_list += sub_list
test_sampled_index_list += sub_list
random.shuffle( train_sampled_index_list )
random.shuffle( test_sampled_index_list )
train_feature = torch.FloatTensor( feature[train_sampled_index_list,:] )
train_label = torch.FloatTensor( label[train_sampled_index_list,:] )
dataset = du.TensorDataset(train_feature, train_label)
loader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )
train_data_list = []
for batch_feature, batch_label in loader:
train_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )
test_feature = torch.FloatTensor( feature[test_sampled_index_list,:] )
test_label = torch.FloatTensor( label[test_sampled_index_list,:] )
dataset = du.TensorDataset( test_feature, test_label )
loader = du.DataLoader( dataset, batch_size=batch_size, pin_memory=True )
test_data_list = []
for batch_feature, batch_label in loader:
test_data_list.append( (batch_feature.cuda(), batch_label.cuda()) )
return train_data_list, test_data_list
def load_unseen_data_loader(train_index_file, test_index_file, feature, label, K, trial, batch_size=1):
train_index_list = np.load( train_index_file )
test_index_list = np.load( test_index_file )
train_feature = torch.FloatTensor( feature[train_index_list,:] )
train_label = torch.FloatTensor( label[train_index_list,] )
test_feature = torch.FloatTensor( feature[test_index_list,:] )
test_label = torch.FloatTensor( label[test_index_list,] )
train_dataset = du.TensorDataset( train_feature, train_label )
test_dataset = du.TensorDataset( test_feature, test_label )
train_loader = du.DataLoader(train_dataset, batch_size=1)
train_data_list = []
for batch_feature, batch_label in train_loader:
train_data_list.append((batch_feature.cuda(), batch_label.cuda()))
test_loader = du.DataLoader(test_dataset, batch_size=batch_size)
test_data_list = []
for batch_feature, batch_label in test_loader:
test_data_list.append((batch_feature.cuda(), batch_label.cuda()))
return train_data_list, test_data_list
def get_unseen_data_loader(feature, label, K, batch_size=1):
index_list = np.random.permutation(feature.shape[0])
train_index_list = index_list[0:K]
test_index_list = index_list[K:]
train_feature = torch.FloatTensor( feature[train_index_list,:] )
train_label = torch.FloatTensor( label[train_index_list,] )
test_feature = torch.FloatTensor( feature[test_index_list,:] )
test_label = torch.FloatTensor( label[test_index_list,] )
train_dataset = du.TensorDataset( train_feature, train_label )
test_dataset = du.TensorDataset( test_feature, test_label )
train_loader = du.DataLoader(train_dataset, batch_size=batch_size)
train_data_list = []
for batch_feature, batch_label in train_loader:
train_data_list.append((batch_feature.cuda(), batch_label.cuda()))
test_loader = du.DataLoader(test_dataset, batch_size=batch_size)
test_data_list = []
for batch_feature, batch_label in test_loader:
test_data_list.append((batch_feature.cuda(), batch_label.cuda()))
return train_data_list, test_data_list
| 34.942708 | 103 | 0.781637 | 983 | 6,709 | 4.925738 | 0.061038 | 0.13197 | 0.085915 | 0.056382 | 0.855845 | 0.829822 | 0.805659 | 0.777984 | 0.730896 | 0.730896 | 0 | 0.00437 | 0.113132 | 6,709 | 191 | 104 | 35.125654 | 0.809412 | 0.04412 | 0 | 0.743119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036697 | false | 0 | 0.045872 | 0 | 0.119266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
fe2100950d94d309a993be14fc9f6d47cb521aed | 826 | py | Python | mikhailova_daria_dz_1/task2.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | mikhailova_daria_dz_1/task2.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | mikhailova_daria_dz_1/task2.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | cube_list = []
number = 0
while number <= 1000:
if not number % 2 == 0:
number = number+1
else:
number = number + 1
cub = number ** 3
cube_list.append(cub)
print(cube_list)
i = 0
remainder_sum = 0
main_sum_7 = 0
for n in cube_list:
i = n
while n != 0:
remainder = n % 10
remainder_sum = remainder + remainder_sum
n = n//10
if remainder_sum % 7 == 0:
main_sum_7 += i
remainder_sum = 0
print(main_sum_7)
cube_list[:] = [n+17 for n in cube_list]
print(cube_list)
i = 0
remainder_sum = 0
main_sum_7 = 0
for n in cube_list:
i = n
while n != 0:
remainder = n % 10
remainder_sum = remainder + remainder_sum
n = n//10
if remainder_sum % 7 == 0:
main_sum_7 += i
remainder_sum = 0
print(main_sum_7)
| 20.65 | 49 | 0.575061 | 133 | 826 | 3.345865 | 0.18797 | 0.269663 | 0.107865 | 0.080899 | 0.750562 | 0.719101 | 0.719101 | 0.719101 | 0.719101 | 0.719101 | 0 | 0.071942 | 0.326877 | 826 | 39 | 50 | 21.179487 | 0.728417 | 0 | 0 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.105263 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
fe2400e9c5f9d4e597894082ae0cead0acd236bf | 91 | py | Python | tests/test_d_genome.py | odococo/bioinformatica | ba8f979140f1f5fc1ff95d7480f7699f3cd6614a | [
"MIT"
] | null | null | null | tests/test_d_genome.py | odococo/bioinformatica | ba8f979140f1f5fc1ff95d7480f7699f3cd6614a | [
"MIT"
] | null | null | null | tests/test_d_genome.py | odococo/bioinformatica | ba8f979140f1f5fc1ff95d7480f7699f3cd6614a | [
"MIT"
] | null | null | null | from bioinformatica.data_retrieval import get_genome
def test_genome():
get_genome()
| 15.166667 | 52 | 0.791209 | 12 | 91 | 5.666667 | 0.75 | 0.264706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 91 | 5 | 53 | 18.2 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
fe7c9df38b1a92987284c0c5938f3d6ab4c33da9 | 2,554 | py | Python | pyaz/storage/metrics/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/storage/metrics/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/storage/metrics/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | '''
Manage storage service metrics.
'''
from ... pyaz_utils import _call_az
def update(retention, services, account_key=None, account_name=None, api=None, connection_string=None, hour=None, minute=None, sas_token=None, timeout=None):
'''
Update metrics settings for a storage account.
Required Parameters:
- retention -- None
- services -- None
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- api -- None
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- hour -- None
- minute -- None
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage metrics update", locals())
def show(account_key=None, account_name=None, connection_string=None, interval=None, sas_token=None, services=None, timeout=None):
'''
Show metrics settings for a storage account.
Optional Parameters:
- account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY
- account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit
- connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING
- interval -- None
- sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN
- services -- None
- timeout -- Request timeout in seconds. Applies to each call to the service.
'''
return _call_az("az storage metrics show", locals())
| 60.809524 | 365 | 0.758418 | 352 | 2,554 | 5.380682 | 0.213068 | 0.133052 | 0.101373 | 0.13094 | 0.843717 | 0.843717 | 0.757128 | 0.757128 | 0.757128 | 0.757128 | 0 | 0 | 0.177369 | 2,554 | 41 | 366 | 62.292683 | 0.901475 | 0.775255 | 0 | 0 | 0 | 0 | 0.105495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0 | 0.2 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
228f85994a4fc9484455fcaa6d7b758a66e889c9 | 367 | py | Python | bitmovin_api_sdk/encoding/outputs/gcs_service_account/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/outputs/gcs_service_account/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/outputs/gcs_service_account/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.encoding.outputs.gcs_service_account.gcs_service_account_api import GcsServiceAccountApi
from bitmovin_api_sdk.encoding.outputs.gcs_service_account.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.outputs.gcs_service_account.gcs_service_account_output_list_query_params import GcsServiceAccountOutputListQueryParams
| 91.75 | 149 | 0.93188 | 47 | 367 | 6.808511 | 0.382979 | 0.15625 | 0.265625 | 0.16875 | 0.575 | 0.575 | 0.575 | 0.575 | 0.575 | 0.41875 | 0 | 0 | 0.032698 | 367 | 3 | 150 | 122.333333 | 0.901408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
22b2fd7ef9a234a700fcd0af38b52d3c26941e88 | 4,393 | py | Python | tests/test_aws.py | prajwalchoudhry/scrapy | 2d70f3173f4533c8ae93950aecdbaffcd32b4396 | [
"BSD-3-Clause"
] | null | null | null | tests/test_aws.py | prajwalchoudhry/scrapy | 2d70f3173f4533c8ae93950aecdbaffcd32b4396 | [
"BSD-3-Clause"
] | null | null | null | tests/test_aws.py | prajwalchoudhry/scrapy | 2d70f3173f4533c8ae93950aecdbaffcd32b4396 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
import unittest
from io import BytesIO
from email.charset import Charset
from scrapy.aws import SESender
class AWSSenderTest(unittest.TestCase):
def test_send(self):
# Test Case with Invalid Credentials
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'username', 'password', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], "Hello from Scrapy!", "Testing Scrapy Tool To Email Using AWS SES", None, None, 'text/plain', False)
self.assertEqual(result['Result'], "Unable to send mail")
# Test Case with Valid Credentials and Invalid Email Addresses
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "invalid@email.com")
result = sender.construct_message(['test@test.com'], "Hello from Scrapy!", "Testing Scrapy Tool To Email Using AWS SES", None, None, 'text/plain', False)
self.assertEqual(result['Result'], "Unable to send mail")
# Test Case with Valid Credentials and Valid Email Addresses
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], "Hello from Scrapy!", "Testing Scrapy Tool To Email Using AWS SES", None, None, 'text/plain', False)
self.assertEqual(result['Result'], "Mail Sent")
self.assertEqual(result['To'][0], "test@test.com")
self.assertEqual(result['CC'], None)
self.assertEqual(result['Body'], "Testing Scrapy Tool To Email Using AWS SES")
self.assertEqual(result['Subject'], "Hello from Scrapy!")
def test_send_single_values_to_and_cc(self):
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], "Hello from Scrapy!", "Testing Scrapy Tool To Email Using AWS SES", ['test@test.com'], None, 'text/plain', False)
self.assertEqual(result['Result'], "Mail Sent")
self.assertEqual(result['To'][0], "test@test.com")
self.assertEqual(result['CC'], "test@test.com")
self.assertEqual(result['Body'], "Testing Scrapy Tool To Email Using AWS SES")
self.assertEqual(result['Subject'], "Hello from Scrapy!")
def test_send_html(self):
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], "Hello from Scrapy!", '<p>body</p>', ['test@test.com'], None, 'text/html', False)
self.assertEqual(result['Result'], "Mail Sent")
self.assertEqual(result['To'][0], "test@test.com")
self.assertEqual(result['CC'], "test@test.com")
self.assertEqual(result['Body'], "<p>body</p>")
self.assertEqual(result['Subject'], "Hello from Scrapy!")
def test_send_utf8(self):
subject = u'sübjèçt'
body = u'bödÿ-àéïöñß'
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], subject, body, ['test@test.com'], None, 'text/html', False)
self.assertEqual(result['Result'], "Mail Sent")
self.assertEqual(result['Subject'], subject)
self.assertEqual(result['CC'], "test@test.com")
self.assertEqual(result['To'][0], "test@test.com")
self.assertEqual(result['Body'], body)
def test_send_attach_utf8(self):
subject = u'sübjèçt'
body = u'bödÿ-àéïöñß'
attach = BytesIO()
attach.seek(0)
attachs = [('attachment', 'text/plain', attach)]
sender = SESender('email-smtp.us-east-1.amazonaws.com', 'ENTER_USERNAME_HERE', 'ENTER_PASSWORD_HERE', 587, "test@test.com")
result = sender.construct_message(['test@test.com'], subject, body, ['test@test.com'], attachs, 'text/html', 'utf-8')
self.assertEqual(result['Result'], "Mail Sent")
self.assertEqual(result['Subject'], subject)
self.assertEqual(result['Body'], body)
self.assertEqual(result['CC'], "test@test.com")
self.assertEqual(result['To'][0], "test@test.com")
if __name__ == "__main__":
unittest.main()
| 54.9125 | 174 | 0.65741 | 571 | 4,393 | 4.964974 | 0.1331 | 0.142857 | 0.2 | 0.042328 | 0.869136 | 0.861023 | 0.861023 | 0.859612 | 0.859612 | 0.817989 | 0 | 0.01055 | 0.180059 | 4,393 | 79 | 175 | 55.607595 | 0.776513 | 0.038015 | 0 | 0.616667 | 0 | 0 | 0.376925 | 0.056385 | 0 | 0 | 0 | 0 | 0.45 | 1 | 0.083333 | false | 0.116667 | 0.066667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
22cfa67a2ee823932610893c4307a8da6bb0b065 | 68,625 | py | Python | benchmarks/SimResults/combinations_spec_heteroFair/cmp_bwavesgcccactusADMastar/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/combinations_spec_heteroFair/cmp_bwavesgcccactusADMastar/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/combinations_spec_heteroFair/cmp_bwavesgcccactusADMastar/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0517832,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.243361,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.287759,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.310633,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.537904,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.308503,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.15704,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.26293,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.89794,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0543638,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0112607,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.100486,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0832796,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.15485,
'Execution Unit/Register Files/Runtime Dynamic': 0.0945403,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.257001,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.666604,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.56692,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00185256,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00185256,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00162827,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000638367,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00119632,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00652971,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0172372,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0800589,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.09243,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.222057,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.271916,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.56187,
'Instruction Fetch Unit/Runtime Dynamic': 0.597799,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0704614,
'L2/Runtime Dynamic': 0.0108746,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.95852,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.31974,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0880439,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.088044,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.37598,
'Load Store Unit/Runtime Dynamic': 1.84199,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.217101,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.434203,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.07705,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0780393,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.316629,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0366073,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.608332,
'Memory Management Unit/Runtime Dynamic': 0.114647,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.0763,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.189662,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0181663,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.159335,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.367164,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.49939,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0284219,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.225012,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.171337,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.147248,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.237506,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.119885,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.504639,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.14214,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.39262,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0323692,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00617625,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0545769,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0456772,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0869461,
'Execution Unit/Register Files/Runtime Dynamic': 0.0518534,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.122085,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.323692,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.51057,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00109412,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00109412,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000993392,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000406664,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000656156,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00383778,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00904624,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0439106,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.79309,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.125071,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.14914,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.14716,
'Instruction Fetch Unit/Runtime Dynamic': 0.331006,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0322763,
'L2/Runtime Dynamic': 0.00688169,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.77434,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.749575,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0497329,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0497328,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.00919,
'Load Store Unit/Runtime Dynamic': 1.04457,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.122633,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.245265,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0435228,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0439726,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.173664,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0206069,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.404537,
'Memory Management Unit/Runtime Dynamic': 0.0645795,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.5753,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0851484,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00767967,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0739333,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.166761,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.12438,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.027361,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.224179,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.157328,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.148778,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.239974,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.121131,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.509883,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.146039,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.38031,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0297226,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00624043,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0549806,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0461518,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0847032,
'Execution Unit/Register Files/Runtime Dynamic': 0.0523922,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.122671,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.317344,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.50917,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00130718,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00130718,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00117957,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000479058,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000662974,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00445691,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.011068,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0443669,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.82212,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.13336,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.15069,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.1776,
'Instruction Fetch Unit/Runtime Dynamic': 0.343941,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0365157,
'L2/Runtime Dynamic': 0.00620085,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.77876,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.748485,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0498758,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0498757,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.01428,
'Load Store Unit/Runtime Dynamic': 1.04433,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.122985,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.24597,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0436478,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0441312,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.175469,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0220548,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.406557,
'Memory Management Unit/Runtime Dynamic': 0.0661859,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.6047,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0781861,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00766397,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0744877,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.160338,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.13017,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0670229,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.255331,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.414253,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.159521,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.257301,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.129877,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.546698,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.118934,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.75547,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0782613,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00669101,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0713499,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0494841,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.149611,
'Execution Unit/Register Files/Runtime Dynamic': 0.0561751,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.167074,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.43659,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.70017,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000373331,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000373331,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000338732,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000138546,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000710843,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00179624,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00309492,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0475703,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.02588,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.119547,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.16157,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.39125,
'Instruction Fetch Unit/Runtime Dynamic': 0.333579,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0375031,
'L2/Runtime Dynamic': 0.0081915,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.14781,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.92776,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0618154,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0618155,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.43972,
'Load Store Unit/Runtime Dynamic': 1.29443,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.152426,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.304853,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0540966,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0546389,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.188138,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0196601,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.437176,
'Memory Management Unit/Runtime Dynamic': 0.074299,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.6506,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.20587,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00970252,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0776454,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.293218,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.70389,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.028344809214654,
'Runtime Dynamic': 5.028344809214654,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.264435,
'Runtime Dynamic': 0.0726587,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 74.1713,
'Peak Power': 107.284,
'Runtime Dynamic': 15.5305,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 73.9069,
'Total Cores/Runtime Dynamic': 15.4578,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.264435,
'Total L3s/Runtime Dynamic': 0.0726587,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.082057 | 124 | 0.682157 | 8,082 | 68,625 | 5.786315 | 0.068052 | 0.123511 | 0.112905 | 0.093403 | 0.93848 | 0.931188 | 0.917203 | 0.886988 | 0.862761 | 0.842168 | 0 | 0.132209 | 0.224277 | 68,625 | 914 | 125 | 75.082057 | 0.746271 | 0 | 0 | 0.642232 | 0 | 0 | 0.657258 | 0.048087 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
22ffc4b46e7983033cdc0b7fb9863a75d1ad9381 | 3,009 | py | Python | VL-T5/src/gqa_model.py | rebedy/VL-T5 | 1799110fe55ad3badc031fe2a3718c1ba61b4fc5 | [
"MIT"
] | 1 | 2022-03-24T08:07:39.000Z | 2022-03-24T08:07:39.000Z | VL-T5/src/gqa_model.py | rebedy/VL-T5 | 1799110fe55ad3badc031fe2a3718c1ba61b4fc5 | [
"MIT"
] | null | null | null | VL-T5/src/gqa_model.py | rebedy/VL-T5 | 1799110fe55ad3badc031fe2a3718c1ba61b4fc5 | [
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
from modeling_t5 import VLT5
class VLT5GQA(VLT5):
def __init__(self, config):
super().__init__(config)
def train_step(self, batch):
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
lm_labels = batch["target_ids"].to(device)
output = self(
input_ids=input_ids,
vis_inputs=(vis_feats, vis_pos),
labels=lm_labels,
return_dict=True
)
assert 'loss' in output
lm_mask = lm_labels != -100
B, L = lm_labels.size()
loss = output['loss']
loss = loss.view(B, L) * lm_mask
loss = loss.sum(dim=1) / lm_mask.sum(dim=1).clamp(min=1) # B
loss = loss.mean()
result = {
'loss': loss
}
return result
def test_step(self, batch, **kwargs):
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
output = self.generate(
input_ids=input_ids,
vis_inputs=(vis_feats, vis_pos),
**kwargs
)
generated_sents = self.tokenizer.batch_decode(output, skip_special_tokens=True)
result = {}
result['pred_ans'] = generated_sents
return result
from modeling_bart import VLBart
class VLBartGQA(VLBart):
def __init__(self, config):
super().__init__(config)
def train_step(self, batch):
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
lm_labels = batch["target_ids"].to(device)
output = self(
input_ids=input_ids,
vis_inputs=(vis_feats, vis_pos),
labels=lm_labels,
return_dict=True
)
assert 'loss' in output
lm_mask = lm_labels != -100
B, L = lm_labels.size()
loss = output['loss']
loss = loss.view(B, L) * lm_mask
loss = loss.sum(dim=1) / lm_mask.sum(dim=1).clamp(min=1) # B
loss = loss.mean()
result = {
'loss': loss
}
return result
def test_step(self, batch, **kwargs):
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
output = self.generate(
input_ids=input_ids,
vis_inputs=(vis_feats, vis_pos),
**kwargs
)
generated_sents = self.tokenizer.batch_decode(output, skip_special_tokens=True)
result = {}
result['pred_ans'] = generated_sents
return result
| 24.463415 | 87 | 0.567298 | 371 | 3,009 | 4.3531 | 0.185984 | 0.079257 | 0.040867 | 0.059443 | 0.918885 | 0.918885 | 0.918885 | 0.918885 | 0.918885 | 0.918885 | 0 | 0.007729 | 0.312064 | 3,009 | 122 | 88 | 24.663934 | 0.772464 | 0.000997 | 0 | 0.847059 | 0 | 0 | 0.050633 | 0 | 0 | 0 | 0 | 0 | 0.023529 | 1 | 0.070588 | false | 0 | 0.058824 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
fe1cc62dad1deaac3ae47a601ee2d1f4ff5f1bf0 | 33,276 | py | Python | tests/bravo_api/test_region_snv_query.py | statgen/bravo_ui | 78e365c9975626685b3801bbb5ac2020791be66b | [
"MIT"
] | null | null | null | tests/bravo_api/test_region_snv_query.py | statgen/bravo_ui | 78e365c9975626685b3801bbb5ac2020791be66b | [
"MIT"
] | 14 | 2021-07-12T19:04:56.000Z | 2022-03-08T20:20:44.000Z | tests/bravo_api/test_region_snv_query.py | statgen/bravo_ui | 78e365c9975626685b3801bbb5ac2020791be66b | [
"MIT"
] | null | null | null | import pytest
from bravo_api.models.readers import snv_lof2code, snv_consequence2code
import sys
@pytest.mark.integration
def test_noargs_query(client, config):
response = client.get('/region/snv')
assert response.status_code == 422
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is None
assert payload['total'] is None
assert payload['limit'] is None
assert payload['next'] is None
assert payload['error'] is not None and len(payload['error']) > 0
@pytest.mark.integration
def test_minargs_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == 17813
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
@pytest.mark.integration
def test_extraargs_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&iamextrakey=true')
assert response.status_code == 422
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is None
assert payload['total'] is None
assert payload['limit'] is None
assert payload['next'] is None
assert payload['error'] is not None and len(payload['error']) > 0
@pytest.mark.integration
def test_PASS_filter_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data = response.get_json()['data']
passed = set()
nonpassed = set()
svm = set()
disc_and_exhet = set()
disc_or_exhet = set()
for item in all_data:
if 'PASS' in item['filter']:
passed.add(item['variant_id'])
else:
nonpassed.add(item['variant_id'])
if 'SVM' in item['filter']:
svm.add(item['variant_id'])
if 'DISC' in item['filter'] and 'EXHET' in item['filter']:
disc_and_exhet.add(item['variant_id'])
if 'DISC' in item['filter'] or 'EXHET' in item['filter']:
disc_or_exhet.add(item['variant_id'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=eq:PASS')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(passed)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert passed == set(v['variant_id'] for v in payload['data'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=ne:PASS')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(nonpassed)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert nonpassed == set(v['variant_id'] for v in payload['data'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=SVM')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(svm)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert svm == set(v['variant_id'] for v in payload['data'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=eq:DISC,eq:EXHET')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(disc_and_exhet)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert disc_and_exhet == set(v['variant_id'] for v in payload['data'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=eq:DISC&filter=eq:EXHET')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(disc_or_exhet)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert disc_or_exhet == set(v['variant_id'] for v in payload['data'])
@pytest.mark.integration
def test_af_filter_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
min_af = 0.05
max_af = 0.1
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data = response.get_json()['data']
max_af_variants = []
min_af_variants = []
min_max_af_variants = []
for item in all_data:
if item['allele_freq'] <= max_af:
max_af_variants.append(item['variant_id'])
if item['allele_freq'] > min_af:
min_af_variants.append(item['variant_id'])
if item['allele_freq'] > min_af and item['allele_freq'] <= max_af:
min_max_af_variants.append(item['variant_id'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&allele_freq=lte:{max_af}')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(max_af_variants)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert all(v1 == v2 for v1, v2 in zip(max_af_variants, (v['variant_id'] for v in payload['data'])))
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&allele_freq=gt:{min_af}')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(min_af_variants)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert all(v1 == v2 for v1, v2 in zip(min_af_variants, (v['variant_id'] for v in payload['data'])))
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&allele_freq=gt:{min_af},lte:{max_af}')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(min_max_af_variants)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert all(v1 == v2 for v1, v2 in zip(min_max_af_variants, (v['variant_id'] for v in payload['data'])))
@pytest.mark.integration
def test_lof_filter_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data = response.get_json()['data']
n = 0
hc_lof = []
lc_lof = []
lof = []
no_lof = []
for item in all_data:
if not 'PASS' in item['filter']:
continue
n += 1
n_hc_lof = 0
n_lc_lof = 0
region_annotation = item['annotation']['region']
n_hc_lof += sum(x == 'HC' for x in region_annotation.get('lof', []))
n_lc_lof += sum(x == 'LC' for x in region_annotation.get('lof', []))
if n_hc_lof > 0:
hc_lof.append(item)
if n_lc_lof > 0:
lc_lof.append(item)
if n_hc_lof > 0 or n_lc_lof > 0:
lof.append(item)
else:
no_lof.append(item)
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=HC')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(hc_lof)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert hc_lof == payload['data']
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=LC')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(lc_lof)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert lc_lof == payload['data']
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=HC&annotation.region.lof=LC')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(lof)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert lof == payload['data']
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=ne:HC,ne:LC')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(no_lof)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert no_lof == payload['data']
@pytest.mark.integration
def test_consequence_filter_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data = response.get_json()['data']
n = 0
splice_acceptor = set()
missense = set()
for item in all_data:
n += 1
n_splice_acceptor = 0
n_missense = 0
region_annotation = item['annotation']['region']
n_splice_acceptor += 'splice_acceptor_variant' in region_annotation['consequence']
n_missense += 'missense_variant' in region_annotation['consequence']
if n_splice_acceptor > 0:
splice_acceptor.add(item['variant_id'])
if n_missense > 0:
missense.add(item['variant_id'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&annotation.region.consequence=splice_acceptor_variant')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(splice_acceptor)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert splice_acceptor == set(v['variant_id'] for v in payload['data'])
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&annotation.region.consequence=missense_variant')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(missense)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
assert missense == set(v['variant_id'] for v in payload['data'])
@pytest.mark.integration
def test_complex_filter_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=eq:PASS')
assert response.status_code == 200
all_data = response.get_json()['data']
all_data_filtered = []
for item in all_data:
if 'frameshift_variant' in item['annotation']['region']['consequence'] and 'HC' in item['annotation']['region']['lof']:
all_data_filtered.append(item)
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=eq:PASS&annotation.region.consequence=eq:frameshift_variant&annotation.region.lof=eq:HC')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(all_data_filtered)
assert all_data_filtered == payload['data']
@pytest.mark.integration
def test_hom_sort_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data_nosort = response.get_json()['data']
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=hom_count:desc')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(all_data_nosort)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
all_data_sorted = payload['data']
assert sorted(all_data_nosort, key = lambda item: item['hom_count'], reverse = True) == all_data_sorted
@pytest.mark.integration
def test_cadd_sort_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data_nosort = response.get_json()['data']
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=cadd_phred:desc')
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(all_data_nosort)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
all_data_sorted = payload['data']
assert sorted(all_data_nosort, key = lambda item: float("-inf") if item['cadd_phred'] is None else item['cadd_phred'], reverse = True) == all_data_sorted
@pytest.mark.integration
def test_lof_sort_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data_nosort = response.get_json()['data']
def get_weight(item):
return sorted(set([ snv_lof2code[x] for x in item['annotation']['region'].get('lof',[]) ]), reverse = True)
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.lof:desc')
assert response.status_code == 200, response.get_json()
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(all_data_nosort)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
all_data_sorted = payload['data']
assert sorted(all_data_nosort, key = lambda item: get_weight(item), reverse = True) == all_data_sorted
@pytest.mark.integration
def test_consequence_sort_query(client, config):
chrom = 22
start = 46151865
stop = 46243776
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
all_data_nosort = response.get_json()['data']
def get_weight(item):
return sorted(set([ snv_consequence2code[x] for x in item['annotation']['region'].get('consequence',[]) ]), reverse = True)
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.consequence:desc')
assert response.status_code == 200, response.get_json()
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) == len(all_data_nosort)
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is not None and payload['limit'] == config['BRAVO_API_PAGE_LIMIT']
assert payload['next'] is None
assert payload['error'] is None
all_data_sorted = payload['data']
assert sorted(all_data_nosort, key = lambda item: get_weight(item), reverse = True) == all_data_sorted
@pytest.mark.integration
def test_limit_too_high_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&limit={config["BRAVO_API_PAGE_LIMIT"] + 1}')
assert response.status_code == 422
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is None
assert payload['total'] is None
assert payload['limit'] is None
assert payload['next'] is None
assert payload['error'] is not None and len(payload['error']) > 0
@pytest.mark.integration
def test_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}')
all_data = response.get_json()['data']
page_size = 1000
assert len(all_data) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&limit={page_size}'
paged_data = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200, response.get_json()
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(all_data)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_data.extend(payload['data'])
next_link = payload['next']
assert all_data == paged_data
@pytest.mark.integration
def test_PASS_filter_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS')
passed = response.get_json()['data']
page_size = 100
assert len(passed) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&limit={page_size}'
paged_passed = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(passed)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_passed.extend(payload['data'])
next_link = payload['next']
assert passed == paged_passed
@pytest.mark.integration
def test_af_filter_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&allele_freq=lte:0.01')
max_af_variants = [ v['variant_id'] for v in response.get_json()['data'] ]
page_size = 100
assert len(max_af_variants) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&allele_freq=lte:0.01&limit={page_size}'
paged_max_af_variants = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(max_af_variants)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_max_af_variants.extend(v['variant_id'] for v in payload['data'])
next_link = payload['next']
assert len(max_af_variants) == len(paged_max_af_variants)
assert all(v1 == v2 for v1, v2 in zip(max_af_variants, paged_max_af_variants))
@pytest.mark.integration
def test_LoF_filter_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=HC&annotation.region.lof=LC')
passed_lof = response.get_json()['data']
page_size = 2
assert len(passed_lof) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.lof=HC&annotation.region.lof=LC&limit={page_size}'
paged_passed_lof = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(passed_lof)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_passed_lof.extend(payload['data'])
next_link = payload['next']
assert passed_lof == paged_passed_lof
@pytest.mark.integration
def test_consequence_filter_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.consequence=missense_variant')
passed_missense = response.get_json()['data']
page_size = 10
assert len(passed_missense) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&annotation.region.consequence=missense_variant&limit={page_size}'
paged_passed_missense = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(passed_missense)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_passed_missense.extend(payload['data'])
next_link = payload['next']
assert passed_missense == paged_passed_missense
@pytest.mark.integration
def test_hom_sorted_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=hom_count:desc')
all_data = response.get_json()['data']
page_size = 100
assert len(all_data) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=hom_count:desc&limit={page_size}'
paged_data = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(all_data)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_data.extend(payload['data'])
next_link = payload['next']
assert all_data == paged_data
@pytest.mark.integration
def test_LoF_sorted_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.lof:desc')
all_data = response.get_json()['data']
page_size = 10
assert len(all_data) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.lof:desc&limit={page_size}'
paged_data = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(all_data)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_data.extend(payload['data'])
next_link = payload['next']
assert all_data == paged_data
@pytest.mark.integration
def test_consequence_sorted_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.consequence:desc')
all_data = response.get_json()['data']
page_size = 100
assert len(all_data) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&sort=annotation.region.consequence:desc&limit={page_size}'
paged_data = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(all_data)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_data.extend(payload['data'])
next_link = payload['next']
assert all_data == paged_data
@pytest.mark.integration
def test_rsid_sorted_paged_query(client, config):
chrom = 22
start = 50673415
stop = 50734298
rsid = 'rs5'
response = client.get(f'/region/snv?chrom={chrom}&start={start}&stop={stop}&rsids=like:{rsid}&sort=annotation.region.consequence:desc')
all_data = response.get_json()['data']
page_size = 100
assert len(all_data) > page_size
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&rsids=like:{rsid}&sort=annotation.region.consequence:desc&limit={page_size}'
paged_data = []
while next_link is not None:
response = client.get(next_link)
assert response.status_code == 200
payload = response.get_json()
assert all(x in payload for x in ['data', 'total', 'limit', 'next', 'error'])
assert payload['data'] is not None and len(payload['data']) <= page_size
assert payload['total'] is not None and payload['total'] == len(all_data)
assert payload['limit'] is not None and payload['limit'] == page_size
assert payload['error'] is None
paged_data.extend(payload['data'])
next_link = payload['next']
assert all_data == paged_data
@pytest.mark.integration
def test_snv_histogram_query(client, config):
chrom = 22
start = 25016233
stop = 25267187
page_size = 10000
windows = 1000
next_link = f'/region/snv?chrom={chrom}&start={start}&stop={stop}&filter=PASS&limit={page_size}'
all_data = 0
while next_link is not None:
response = client.get(next_link)
all_data += len(response.get_json()['data'])
next_link = response.get_json()['next']
response = client.get(f'/region/snv/histogram?chrom={chrom}&start={start}&stop={stop}&filter=PASS&windows={windows}')
assert response.status_code == 200
payload = response.get_json()
assert all( x in payload['data'] for x in ['chrom', 'window-size', 'windows'])
assert len(payload['data']['windows']) > 0
assert len(payload['data']['windows']) < windows
assert all_data == sum(entry['count'] for entry in payload['data']['windows'])
@pytest.mark.integration
def test_snv_summary_query(client, config):
chrom = 22
start = 25016233
stop = 25267187
response = client.get(f'/region/snv/summary?chrom={chrom}&start={start}&stop={stop}')
assert response.status_code == 200
payload = response.get_json()
assert payload['data'] is not None
assert payload['data']['all']['total'] == payload['data']['passed']['total'] + payload['data']['failed']['total']
assert payload['data']['all']['total'] == payload['data']['all']['snv'] + payload['data']['all']['indels']
assert payload['data']['passed']['total'] == payload['data']['passed']['snv'] + payload['data']['passed']['indels']
assert payload['data']['failed']['total'] == payload['data']['failed']['snv'] + payload['data']['failed']['indels']
assert payload['total'] is not None and payload['total'] == len(payload['data'])
assert payload['limit'] is None
assert payload['error'] is None
| 46.605042 | 176 | 0.664653 | 4,714 | 33,276 | 4.55961 | 0.031184 | 0.093747 | 0.041872 | 0.049688 | 0.919001 | 0.898018 | 0.880897 | 0.857867 | 0.85047 | 0.827626 | 0 | 0.023262 | 0.182233 | 33,276 | 713 | 177 | 46.670407 | 0.76661 | 0 | 0 | 0.700787 | 0 | 0.059843 | 0.228844 | 0.128201 | 0 | 0 | 0 | 0 | 0.428346 | 1 | 0.040945 | false | 0.075591 | 0.004724 | 0.00315 | 0.048819 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
a3a95e0acf27fdd89d56cf795a9ee2515b39160c | 24,447 | py | Python | subject/tests/unit/common/test_property_utils.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/unit/common/test_property_utils.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/unit/common/test_property_utils.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from subject.api import policy
from subject.common import exception
from subject.common import property_utils
import subject.context
from subject.tests.unit import base
CONFIG_SECTIONS = [
'^x_owner_.*',
'spl_create_prop',
'spl_read_prop',
'spl_read_only_prop',
'spl_update_prop',
'spl_update_only_prop',
'spl_delete_prop',
'spl_delete_empty_prop',
'^x_all_permitted.*',
'^x_none_permitted.*',
'x_none_read',
'x_none_update',
'x_none_delete',
'x_case_insensitive',
'x_foo_matcher',
'x_foo_*',
'.*'
]
def create_context(policy, roles=None):
if roles is None:
roles = []
return subject.context.RequestContext(roles=roles,
policy_enforcer=policy)
class TestPropertyRulesWithRoles(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithRoles, self).setUp()
self.set_property_protections()
self.policy = policy.Enforcer()
def tearDown(self):
super(TestPropertyRulesWithRoles, self).tearDown()
def test_is_property_protections_enabled_true(self):
self.config(property_protection_file="property-protections.conf")
self.assertTrue(property_utils.is_property_protection_enabled())
def test_is_property_protections_enabled_false(self):
self.config(property_protection_file=None)
self.assertFalse(property_utils.is_property_protection_enabled())
def test_property_protection_file_doesnt_exist(self):
self.config(property_protection_file='fake-file.conf')
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_mutually_exclusive_rule(self):
exclusive_rules = {'.*': {'create': ['@', '!'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(exclusive_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-role'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_missing_operation(self):
rules_with_missing_operation = {'^[0-9]': {'create': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_missing_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_misspelt_operation(self):
rules_with_misspelt_operation = {'^[0-9]': {'create': ['fake-role'],
'rade': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_misspelt_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_whitespace(self):
rules_whitespace = {
'^test_prop.*': {
'create': ['member ,fake-role'],
'read': ['fake-role, member'],
'update': ['fake-role, member'],
'delete': ['fake-role, member']
}
}
self.set_property_protection_rules(rules_whitespace)
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['fake-role'])))
def test_check_property_rules_invalid_action(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'hall', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'read',
create_context(self.policy, ['member'])))
def test_check_property_rules_read_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['member'])))
def test_check_property_rules_create_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['admin'])))
def test_check_property_rules_create_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'create',
create_context(self.policy, ['member'])))
def test_check_property_rules_create_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['member'])))
def test_check_property_rules_update_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['admin'])))
def test_check_property_rules_update_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'update',
create_context(self.policy, ['member'])))
def test_check_property_rules_update_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['member'])))
def test_check_property_rules_delete_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'delete',
create_context(self.policy, ['member'])))
def test_check_property_rules_delete_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['member'])))
def test_property_config_loaded_in_order(self):
"""
Verify the order of loaded config sections matches that from the
configuration file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertEqual(CONFIG_SECTIONS, property_utils.CONFIG.sections())
def test_property_rules_loaded_in_order(self):
"""
Verify rules are iterable in the same order as read from the config
file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
for i in range(len(property_utils.CONFIG.sections())):
self.assertEqual(property_utils.CONFIG.sections()[i],
self.rules_checker.rules[i][0].pattern)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
def test_check_case_insensitive_property_rules(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_case_insensitive', 'create',
create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_case_insensitive', 'read',
create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_case_insensitive', 'update',
create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_case_insensitive', 'delete',
create_context(self.policy, ['member'])))
class TestPropertyRulesWithPolicies(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithPolicies, self).setUp()
self.set_property_protections(use_policies=True)
self.policy = policy.Enforcer()
self.rules_checker = property_utils.PropertyRules(self.policy)
def tearDown(self):
super(TestPropertyRulesWithPolicies, self).tearDown()
def test_check_property_rules_create_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_create_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_read_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_read_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_update_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['admin'])))
def test_check_property_rules_update_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_delete_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['fake-role'])))
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-policy'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_multiple_policies(self):
malformed_rules = {'^x_.*': {'create': ['fake-policy, another_pol'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
| 49.090361 | 78 | 0.658854 | 2,618 | 24,447 | 5.778457 | 0.068755 | 0.071986 | 0.124802 | 0.104112 | 0.861581 | 0.856954 | 0.827935 | 0.82701 | 0.812533 | 0.803609 | 0 | 0.001118 | 0.231439 | 24,447 | 497 | 79 | 49.189135 | 0.804034 | 0.034033 | 0 | 0.71464 | 0 | 0 | 0.107273 | 0.001954 | 0 | 0 | 0 | 0 | 0.2134 | 1 | 0.156328 | false | 0 | 0.014888 | 0 | 0.17866 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a3d1bf63d46ac20e26ebfcb364a24b1145cd9114 | 295 | py | Python | scikit_optim/__init__.py | antoinezambelli/scikit-optim | 6553f849800e7a5feb6f6271e6bcc36738033744 | [
"MIT"
] | null | null | null | scikit_optim/__init__.py | antoinezambelli/scikit-optim | 6553f849800e7a5feb6f6271e6bcc36738033744 | [
"MIT"
] | null | null | null | scikit_optim/__init__.py | antoinezambelli/scikit-optim | 6553f849800e7a5feb6f6271e6bcc36738033744 | [
"MIT"
] | 1 | 2019-05-03T17:20:48.000Z | 2019-05-03T17:20:48.000Z | from .scikit_optim import (ModelSelector, GaussNB, MultiNB, kNN, SupportVC,
RandForest, DecTree, LogRegress, GMM)
__all__ = [
'ModelSelector',
'GaussNB',
'MultiNB',
'kNN',
'SupportVC',
'RandForest',
'DecTree',
'LogRegress',
'GMM'
] | 21.071429 | 75 | 0.566102 | 23 | 295 | 7.043478 | 0.608696 | 0.246914 | 0.333333 | 0.37037 | 0.851852 | 0.851852 | 0.851852 | 0.851852 | 0.851852 | 0 | 0 | 0 | 0.298305 | 295 | 14 | 76 | 21.071429 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0.233108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
a3db644c3767efd2a715383ab04d6d47412e279b | 5,674 | py | Python | src/wi/tests/main_test.py | cc1-cloud/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 11 | 2015-05-06T14:16:54.000Z | 2022-02-08T23:21:31.000Z | src/wi/tests/main_test.py | fortress-shell/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 1 | 2015-10-30T21:08:11.000Z | 2015-10-30T21:08:11.000Z | src/wi/tests/main_test.py | fortress-shell/cc1 | 8113673fa13b6fe195cea99dedab9616aeca3ae8 | [
"Apache-2.0"
] | 5 | 2016-02-12T22:01:38.000Z | 2021-12-06T16:56:54.000Z | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
# -*- coding: utf-8 -*-
"""@package src.wi.tests.main_test
@author Piotr Wójcik
@author Krzysztof Danielowski
@date 11.10.2012
"""
from wi.tests import WiTestCase
import unittest
class MainTests(WiTestCase, unittest.TestCase):
def _test_news_create(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys("witest")
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test")
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry added."])
driver.find_element_by_link_text("Logout").click()
def _test_news_create_fail_required(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_content").clear()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div[1]/ul/li", ["This field is required."])
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div[2]/ul/li", ["This field is required."])
driver.find_element_by_link_text("Logout").click()
def _test_news_create_sticky(self):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//a[@id='main_create_news']", ["Create a news entry"])
driver.find_element_by_id("main_create_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys("witest")
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test")
driver.find_element_by_id("id_sticky").click()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry added."])
driver.find_element_by_link_text("Logout").click()
def _test_news_edit(self, topic):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//div[@id='item-list']/div/div[2]", ["witest"])
driver.find_element_by_id("main_edit_news").click()
self.wait_for_text("//div[@id='dialog-div']/form/div/fieldset/div/span", ["Topic"])
driver.find_element_by_id("id_topic").clear()
driver.find_element_by_id("id_topic").send_keys(topic)
driver.find_element_by_id("id_content").clear()
driver.find_element_by_id("id_content").send_keys("test2")
driver.find_element_by_id("id_sticky").click()
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["News entry edited."])
driver.find_element_by_link_text("Logout").click()
def _test_news_remove(self, topic):
driver = self.driver
self.base_url = self.TEST_SERVER
self.login_testuser(self.TEST_admin_cm)
driver.get(self.base_url + "/news/")
self.wait_for_text("//div[@id='item-list']/div/div[2]", [topic])
driver.find_element_by_id("main_remove_news").click()
self.wait_for_text("//div[@id='dialog-div']/p", ["Do you want to delete news entry"])
driver.find_element_by_css_selector("button.ok-button.mid_button").click()
self.wait_for_message(["You have successfully removed news entry"])
driver.find_element_by_link_text("Logout").click()
def test_1_simple(self):
self._test_news_create()
topic = 'witest'
self._test_news_edit(topic)
self._test_news_remove(topic)
def test_2_fails(self):
self._test_news_create_fail_required()
def test_3_utf8_edit(self):
self._test_news_create()
topic = u'ąśłęąĄŁŁ'
self._test_news_edit(topic)
self._test_news_remove(topic)
| 35.4625 | 113 | 0.679591 | 818 | 5,674 | 4.397311 | 0.195599 | 0.086183 | 0.146511 | 0.163748 | 0.768974 | 0.756186 | 0.706422 | 0.705032 | 0.705032 | 0.705032 | 0 | 0.006652 | 0.17871 | 5,674 | 159 | 114 | 35.685535 | 0.765236 | 0.138174 | 0 | 0.712644 | 0 | 0.022989 | 0.244399 | 0.126413 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091954 | false | 0 | 0.022989 | 0 | 0.126437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
430ec633d20a69582157780fcbac48e4e937f1e3 | 244 | py | Python | weight/curves/__init__.py | VictorArnaud/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | null | null | null | weight/curves/__init__.py | VictorArnaud/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | 8 | 2018-05-17T22:50:18.000Z | 2018-05-19T01:15:26.000Z | weight/curves/__init__.py | VWApplications/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | 1 | 2018-06-27T00:18:21.000Z | 2018-06-27T00:18:21.000Z | from weight.curves.male_years import WeightCurveMaleYears
from weight.curves.male_months import WeightCurveMaleMonths
from weight.curves.female_years import WeightCurveFemaleYears
from weight.curves.female_months import WeightCurveFemaleMonths
| 48.8 | 63 | 0.901639 | 28 | 244 | 7.714286 | 0.428571 | 0.185185 | 0.296296 | 0.185185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065574 | 244 | 4 | 64 | 61 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
4312b4138062bcb8406461c492d399f24471e6a6 | 3,331 | py | Python | migrations/versions/68a62bb9bff4_nullable_fields.py | pierous/sarna | f7d92193c98f25cc0d0516b3aedc6d13b7ab2a0d | [
"MIT"
] | 1 | 2022-03-15T10:55:55.000Z | 2022-03-15T10:55:55.000Z | migrations/versions/68a62bb9bff4_nullable_fields.py | pierous/sarna | f7d92193c98f25cc0d0516b3aedc6d13b7ab2a0d | [
"MIT"
] | null | null | null | migrations/versions/68a62bb9bff4_nullable_fields.py | pierous/sarna | f7d92193c98f25cc0d0516b3aedc6d13b7ab2a0d | [
"MIT"
] | 1 | 2022-03-15T10:56:58.000Z | 2022-03-15T10:56:58.000Z | """Nullable fields
Revision ID: 68a62bb9bff4
Revises: 44c6c91dd3c4
Create Date: 2020-09-29 16:50:52.247676
"""
from alembic import op
import sqlalchemy as sa
import sarna
# revision identifiers, used by Alembic.
revision = '68a62bb9bff4'
down_revision = '44c6c91dd3c4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('assessment', 'platform',
existing_type=sa.VARCHAR(length=64),
nullable=True)
op.alter_column('finding', 'business_risk',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding', 'dissemination',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding', 'exploitability',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding', 'solution_complexity',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding', 'tech_risk',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding_template', 'business_risk',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding_template', 'dissemination',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding_template', 'exploitability',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding_template', 'solution_complexity',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('finding_template', 'tech_risk',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('finding_template', 'tech_risk',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding_template', 'solution_complexity',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding_template', 'exploitability',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding_template', 'dissemination',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding_template', 'business_risk',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding', 'tech_risk',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding', 'solution_complexity',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding', 'exploitability',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding', 'dissemination',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('finding', 'business_risk',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('assessment', 'platform',
existing_type=sa.VARCHAR(length=64),
nullable=False)
# ### end Alembic commands ###
| 35.43617 | 62 | 0.602222 | 334 | 3,331 | 5.799401 | 0.188623 | 0.079504 | 0.147651 | 0.206505 | 0.843573 | 0.843573 | 0.841507 | 0.841507 | 0.839442 | 0.839442 | 0 | 0.020825 | 0.279195 | 3,331 | 93 | 63 | 35.817204 | 0.785923 | 0.089162 | 0 | 0.88 | 0 | 0 | 0.187521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.04 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
431c2a5fead263f306331f1fc5fc6df09e74d350 | 85 | py | Python | thing/tests/__init__.py | Gillingham/evething | e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4 | [
"BSD-2-Clause"
] | 33 | 2015-02-18T00:07:57.000Z | 2020-06-09T15:19:05.000Z | thing/tests/__init__.py | Gillingham/evething | e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4 | [
"BSD-2-Clause"
] | 19 | 2015-03-09T19:51:43.000Z | 2019-10-19T12:04:23.000Z | thing/tests/__init__.py | Gillingham/evething | e00b722cf00a6a3a25e1fff3014ed3365c7ef3e4 | [
"BSD-2-Clause"
] | 20 | 2015-02-20T17:53:17.000Z | 2022-02-11T06:29:11.000Z | from thing.tests.models import * # NOPEP8
from thing.tests.views import * # NOPEP8
| 28.333333 | 42 | 0.741176 | 12 | 85 | 5.25 | 0.583333 | 0.285714 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028169 | 0.164706 | 85 | 2 | 43 | 42.5 | 0.859155 | 0.152941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
432208e64badd98a6cf094bac136ff67f6bdb1e9 | 523 | py | Python | ramda/reject_test.py | Rafi993/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | 56 | 2018-08-06T08:44:58.000Z | 2022-03-17T09:49:03.000Z | ramda/reject_test.py | Rafi993/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | 28 | 2019-06-17T11:09:52.000Z | 2022-02-18T16:59:21.000Z | ramda/reject_test.py | slavaGanzin/pyramda | 4fa7fe28d5eaa798b702d28bdd3948515cb88f48 | [
"MIT"
] | 5 | 2019-09-18T09:24:38.000Z | 2021-07-21T08:40:23.000Z | from .reject import reject
def test_reject_filters_out_unwanted_items_in_iterable():
assert reject(lambda x: x % 2 == 1, [1, 2, 3, 4]) == [2, 4]
def test_curry_reject_filters_out_unwanted_items_in_iterable():
assert reject(lambda x: x % 2 == 1)([1, 2, 3, 4]) == [2, 4]
def test_reject_does_not_remove_duplicates():
assert reject(lambda x: x % 2 == 1, [1, 2, 3, 4, 4]) == [2, 4, 4]
def test_curry_reject_does_not_remove_duplicates():
assert reject(lambda x: x % 2 == 1)([1, 2, 3, 4, 4]) == [2, 4, 4]
| 29.055556 | 69 | 0.646272 | 94 | 523 | 3.319149 | 0.244681 | 0.089744 | 0.230769 | 0.24359 | 0.907051 | 0.852564 | 0.852564 | 0.852564 | 0.852564 | 0.852564 | 0 | 0.084706 | 0.187381 | 523 | 17 | 70 | 30.764706 | 0.649412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.444444 | 1 | 0.444444 | true | 0 | 0.111111 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 12 |
4a50aff3f4b2b661cd1d47b96f9e7c1f490a7943 | 1,614 | py | Python | hublib/ui/test/test_groups.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 6 | 2017-05-23T19:17:29.000Z | 2022-02-24T00:36:46.000Z | hublib/ui/test/test_groups.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 1 | 2019-02-13T13:35:57.000Z | 2019-02-13T13:35:57.000Z | hublib/ui/test/test_groups.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 6 | 2017-09-12T19:51:12.000Z | 2021-01-13T23:43:57.000Z | from __future__ import print_function
import pytest
import sys
import ipywidgets as widgets
from . import setup_test_comm, teardown_test_comm
import hublib.ui as ui
class TestForm:
@classmethod
def setup_class(cls):
setup_test_comm()
@classmethod
def teardown_class(cls):
teardown_test_comm()
def test_form(self):
wlist = []
for i in range(4):
wlist.append(ui.String(name='Name%s' % i, description='', value='Description of #%s' % i))
f = ui.Form(wlist, name='My Form')
assert f.disabled is False
for w in wlist:
assert w.disabled is False
f.disabled = True
assert f.disabled is True
for w in wlist:
assert w.disabled is True
f.disabled = False
assert f.disabled is False
for w in wlist:
assert w.disabled is False
class TestTab:
@classmethod
def setup_class(cls):
setup_test_comm()
@classmethod
def teardown_class(cls):
teardown_test_comm()
def test_form(self):
wlist = []
for i in range(4):
wlist.append(ui.String(name='Name%s' % i, description='', value='Description of #%s' % i))
f = ui.Tab(wlist)
assert f.disabled is False
for w in wlist:
assert w.disabled is False
f.disabled = True
assert f.disabled is True
for w in wlist:
assert w.disabled is True
f.disabled = False
assert f.disabled is False
for w in wlist:
assert w.disabled is False
| 21.236842 | 102 | 0.58798 | 217 | 1,614 | 4.267281 | 0.211982 | 0.12959 | 0.12959 | 0.110151 | 0.801296 | 0.801296 | 0.801296 | 0.801296 | 0.801296 | 0.801296 | 0 | 0.001855 | 0.332094 | 1,614 | 75 | 103 | 21.52 | 0.857143 | 0 | 0 | 0.807692 | 0 | 0 | 0.03414 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.269231 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4a74308c67da808159a70976576a5ce83144b60e | 2,154 | py | Python | tests/test_multi_value_mark.py | mwbutcher/flake8-pytest-mark | 5dee18225d980cbc830773dc18a30b84fa2eb294 | [
"Apache-2.0"
] | null | null | null | tests/test_multi_value_mark.py | mwbutcher/flake8-pytest-mark | 5dee18225d980cbc830773dc18a30b84fa2eb294 | [
"Apache-2.0"
] | null | null | null | tests/test_multi_value_mark.py | mwbutcher/flake8-pytest-mark | 5dee18225d980cbc830773dc18a30b84fa2eb294 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# args to only use checks that raise an 'M' prefixed error
extra_args = ['--select', 'M']
config = """
[flake8]
pytest_mark1 = name=jira,value_regex=[a-zA-Z]*-\d*,allow_multiple_args=true
"""
def test_with_valid_test_id_marks(flake8dir):
flake8dir.make_setup_cfg(config)
flake8dir.make_example_py("""
@pytest.mark.jira('ASC-123', 'ASC-124', 'ASC-125')
def test_happy_path():
pass
""")
result = flake8dir.run_flake8(extra_args)
assert result.out_lines == []
def test_with_invalid_test_id_mark(flake8dir):
flake8dir.make_setup_cfg(config)
flake8dir.make_example_py("""
@pytest.mark.jira('ASC-123', 'not_good', 'ASC-125')
def test_happy_path():
pass
""")
result = flake8dir.run_flake8(extra_args)
assert result.out_lines == ["./example.py:1:1: M601 the mark values '['not_good']' do not match the configuration specified by pytest_mark1, Configured regex: '[a-zA-Z]*-\\d*'"] # noqa
def test_with_multiple_invalid_test_id_mark(flake8dir):
flake8dir.make_setup_cfg(config)
flake8dir.make_example_py("""
@pytest.mark.jira('bad', 'not_good', 'really bad')
def test_happy_path():
pass
""")
result = flake8dir.run_flake8(extra_args)
assert result.out_lines == ["./example.py:1:1: M601 the mark values '['bad', 'not_good', 'really bad']' do not match the configuration specified by pytest_mark1, Configured regex: '[a-zA-Z]*-\\d*'"] # noqa
def test_values_that_are_not_strings(flake8dir):
flake8dir.make_setup_cfg(config)
flake8dir.make_example_py("""
@pytest.mark.jira(['ASC-123', 'ASC-124', 'ASC-125'])
def test_happy_path():
pass
""")
result = flake8dir.run_flake8(extra_args)
assert result.out_lines == [u'./example.py:1:1: M701 mark values must be strings']
def test_multiple_values_that_are_not_strings(flake8dir):
flake8dir.make_setup_cfg(config)
flake8dir.make_example_py("""
@pytest.mark.jira('ASC-432', ['ASC-123', 'ASC-124'], 'RE-234')
def test_happy_path():
pass
""")
result = flake8dir.run_flake8(extra_args)
assert result.out_lines == [u'./example.py:1:1: M701 mark values must be strings']
| 32.636364 | 210 | 0.6987 | 319 | 2,154 | 4.45768 | 0.253919 | 0.049226 | 0.077356 | 0.094937 | 0.841069 | 0.807314 | 0.807314 | 0.807314 | 0.807314 | 0.807314 | 0 | 0.046562 | 0.142526 | 2,154 | 65 | 211 | 33.138462 | 0.723335 | 0.040854 | 0 | 0.64 | 0 | 0.1 | 0.467734 | 0.095585 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0.1 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
4a7b4c40cb7e8cdd143123a4835e4df091827df5 | 7,293 | py | Python | pyxform/tests/xlsform_spec_test.py | loginphpcom/pyxform | 4da80774db25935d894fcbf42d2c423648205e80 | [
"BSD-2-Clause"
] | 10 | 2015-06-19T07:40:31.000Z | 2021-12-27T00:03:16.000Z | pyxform/tests/xlsform_spec_test.py | loginphpcom/pyxform | 4da80774db25935d894fcbf42d2c423648205e80 | [
"BSD-2-Clause"
] | 1 | 2019-12-27T18:26:48.000Z | 2019-12-27T18:26:48.000Z | pyxform/tests/xlsform_spec_test.py | loginphpcom/pyxform | 4da80774db25935d894fcbf42d2c423648205e80 | [
"BSD-2-Clause"
] | 21 | 2015-02-26T20:15:23.000Z | 2019-12-27T18:13:04.000Z | """
Some tests for the new (v0.9) spec is properly implemented.
"""
import unittest2 as unittest
import codecs
import os
import sys
# Hack to make sure that pyxform is on the python import path
parentdir = os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, parentdir)
import pyxform
from pyxform.errors import PyXFormError
from .utils import XFormTestCase
DIR = os.path.dirname(__file__)
class main_test(XFormTestCase):
maxDiff = None
def runTest(self):
filename = "xlsform_spec_test.xlsx"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
# Get the xform output path:
root_filename, ext = os.path.splitext(filename)
output_path = os.path.join(DIR, "test_output", root_filename + ".xml")
expected_output_path = os.path.join(DIR, "test_expected_output",
root_filename + ".xml")
# Do the conversion:
warnings = []
json_survey = pyxform.xls2json.parse_file_to_json(
path_to_excel_file, warnings=warnings)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(output_path, warnings=warnings)
# print warnings
# Compare with the expected output:
with codecs.open(expected_output_path, 'rb', encoding="utf-8") \
as expected_file:
with codecs.open(output_path, 'rb', encoding="utf-8") \
as actual_file:
self.assertXFormEqual(expected_file.read(), actual_file.read())
class flat_xlsform_test(XFormTestCase):
maxDiff = None
def runTest(self):
filename = "flat_xlsform_test.xlsx"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
# Get the xform output path:
root_filename, ext = os.path.splitext(filename)
output_path = os.path.join(DIR, "test_output", root_filename + ".xml")
expected_output_path = os.path.join(
DIR, "test_expected_output", root_filename + ".xml")
# Do the conversion:
warnings = []
json_survey = pyxform.xls2json.parse_file_to_json(
path_to_excel_file, warnings=warnings)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(output_path, warnings=warnings)
# print warnings
# Compare with the expected output:
with codecs.open(expected_output_path, 'rb', encoding="utf-8") \
as expected_file:
with codecs.open(output_path, 'rb', encoding="utf-8") \
as actual_file:
self.assertXFormEqual(expected_file.read(), actual_file.read())
class test_new_widgets(XFormTestCase):
maxDiff = None
def runTest(self):
filename = "widgets.xls"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
# Get the xform output path:
root_filename, ext = os.path.splitext(filename)
output_path = os.path.join(DIR, "test_output", root_filename + ".xml")
expected_output_path = os.path.join(
DIR, "test_expected_output", root_filename + ".xml")
# Do the conversion:
warnings = []
json_survey = pyxform.xls2json.parse_file_to_json(path_to_excel_file,
warnings=warnings)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(output_path, warnings=warnings)
# print warnings
# Compare with the expected output:
with codecs.open(expected_output_path, 'rb', encoding="utf-8") \
as expected_file:
with codecs.open(output_path, 'rb', encoding="utf-8") \
as actual_file:
self.assertXFormEqual(expected_file.read(), actual_file.read())
class warnings_test(unittest.TestCase):
"""
Just checks that the number of warnings thrown when reading warnings.xls
doesn't change
"""
def runTest(self):
filename = "warnings.xls"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
warnings = []
pyxform.xls2json.parse_file_to_json(
path_to_excel_file, warnings=warnings)
self.assertEquals(
len(warnings), 21, "Found " + str(len(warnings)) + " warnings")
class calculate_without_calculation_test(unittest.TestCase):
"""
Just checks that calculate field without calculation raises a PyXFormError.
"""
def runTest(self):
filename = "calculate_without_calculation.xls"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
self.assertRaises(PyXFormError, pyxform.xls2json.parse_file_to_json,
path_to_excel_file)
class PullDataTest(XFormTestCase):
maxDiff = None
def runTest(self):
filename = "pull_data.xlsx"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
# Get the xform output path:
root_filename, ext = os.path.splitext(filename)
output_path = os.path.join(DIR, "test_output", root_filename + ".xml")
expected_output_path = os.path.join(DIR, "test_expected_output",
root_filename + ".xml")
# Do the conversion:
warnings = []
json_survey = pyxform.xls2json.parse_file_to_json(
path_to_excel_file, warnings=warnings)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(output_path, warnings=warnings)
# Compare with the expected output:
with codecs.open(expected_output_path, 'rb', encoding="utf-8") \
as expected_file:
with codecs.open(output_path, 'rb', encoding="utf-8") \
as actual_file:
self.assertXFormEqual(expected_file.read(), actual_file.read())
# cleanup
os.remove(output_path)
class SeachAndSelectTest(XFormTestCase):
maxDiff = None
def runTest(self):
filename = "search_and_select.xlsx"
path_to_excel_file = os.path.join(DIR, "example_xls", filename)
# Get the xform output path:
root_filename, ext = os.path.splitext(filename)
output_path = os.path.join(DIR, "test_output", root_filename + ".xml")
expected_output_path = os.path.join(DIR, "test_expected_output",
root_filename + ".xml")
# Do the conversion:
warnings = []
json_survey = pyxform.xls2json.parse_file_to_json(
path_to_excel_file, warnings=warnings)
survey = pyxform.create_survey_element_from_dict(json_survey)
survey.print_xform_to_file(output_path, warnings=warnings)
# Compare with the expected output:
with codecs.open(expected_output_path, 'rb', encoding="utf-8") \
as expected_file:
with codecs.open(output_path, 'rb', encoding="utf-8") \
as actual_file:
self.assertXFormEqual(expected_file.read(), actual_file.read())
# cleanup
os.remove(output_path)
if __name__ == '__main__':
unittest.main()
| 38.792553 | 79 | 0.638695 | 870 | 7,293 | 5.082759 | 0.136782 | 0.072365 | 0.038444 | 0.049977 | 0.829037 | 0.829037 | 0.813659 | 0.782451 | 0.749661 | 0.749661 | 0 | 0.004283 | 0.263678 | 7,293 | 187 | 80 | 39 | 0.819181 | 0.102153 | 0 | 0.728 | 0 | 0 | 0.077243 | 0.015264 | 0 | 0 | 0 | 0 | 0.056 | 1 | 0.056 | false | 0 | 0.056 | 0 | 0.208 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
434ae04015881f5c4c4ef67d708adbb366752bd5 | 14,995 | py | Python | dexbuytools/helpers/data/cro.py | Minh-Trng/dex-buy-tools | 8470dfb6546285773cfc0ba1f4aad843bd8c303a | [
"MIT"
] | 6 | 2022-02-02T09:16:37.000Z | 2022-02-24T06:26:22.000Z | dexbuytools/helpers/data/cro.py | Minh-Trng/dex-buy-tools | 8470dfb6546285773cfc0ba1f4aad843bd8c303a | [
"MIT"
] | 5 | 2022-02-07T13:02:29.000Z | 2022-02-10T21:21:17.000Z | dexbuytools/helpers/data/cro.py | Minh-Trng/dex-buy-tools | 8470dfb6546285773cfc0ba1f4aad843bd8c303a | [
"MIT"
] | 2 | 2022-02-21T05:20:11.000Z | 2022-03-28T02:53:20.000Z | chain_data = {
'MAIN_TOKEN_ADDRESS': '0x5C7F8A570d578ED84E63fdFA7b1eE72dEae1AE23',
'MAIN_TOKEN_ABI': '[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"constant":true,"inputs":[{"internalType":"address","name":"_owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getOwner","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]',
'ROUTER_ADDRESS_MMF': "0x145677FC4d9b8F19B5D56d1820c48e0443049a30",
'ROUTER_ABI_MMF': '[{"inputs":[{"internalType":"address","name":"_factory","type":"address"},{"internalType":"address","name":"_WETH","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"WETH","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"},{"internalType":"uint256","name":"amountADesired","type":"uint256"},{"internalType":"uint256","name":"amountBDesired","type":"uint256"},{"internalType":"uint256","name":"amountAMin","type":"uint256"},{"internalType":"uint256","name":"amountBMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"addLiquidity","outputs":[{"internalType":"uint256","name":"amountA","type":"uint256"},{"internalType":"uint256","name":"amountB","type":"uint256"},{"internalType":"uint256","name":"liquidity","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"amountTokenDesired","type":"uint256"},{"internalType":"uint256","name":"amountTokenMin","type":"uint256"},{"internalType":"uint256","name":"amountETHMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"addLiquidityETH","outputs":[{"internalType":"uint256","name":"amountToken","type":"uint256"},{"internalType":"uint256","name":"amountETH","type":"uint256"},{"internalType":"uint256","name":"liquidity","type":"uint256"}],"stateMutability":"payable","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"reserveIn","type":"uint256"},{"internalType":"uint256","name":"reserveOut","type":"uint256"}],"name":"getAmountIn","outputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"reserveIn","type":"uint256"},{"internalType":"uint256","name":"reserveOut","type":"uint256"}],"name":"getAmountOut","outputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"}],"name":"getAmountsIn","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"}],"name":"getAmountsOut","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountA","type":"uint256"},{"internalType":"uint256","name":"reserveA","type":"uint256"},{"internalType":"uint256","name":"reserveB","type":"uint256"}],"name":"quote","outputs":[{"internalType":"uint256","name":"amountB","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountAMin","type":"uint256"},{"internalType":"uint256","name":"amountBMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"removeLiquidity","outputs":[{"internalType":"uint256","name":"amountA","type":"uint256"},{"internalType":"uint256","name":"amountB","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountTokenMin","type":"uint256"},{"internalType":"uint256","name":"amountETHMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"removeLiquidityETH","outputs":[{"internalType":"uint256","name":"amountToken","type":"uint256"},{"internalType":"uint256","name":"amountETH","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountTokenMin","type":"uint256"},{"internalType":"uint256","name":"amountETHMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"removeLiquidityETHSupportingFeeOnTransferTokens","outputs":[{"internalType":"uint256","name":"amountETH","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountTokenMin","type":"uint256"},{"internalType":"uint256","name":"amountETHMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"bool","name":"approveMax","type":"bool"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"removeLiquidityETHWithPermit","outputs":[{"internalType":"uint256","name":"amountToken","type":"uint256"},{"internalType":"uint256","name":"amountETH","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountTokenMin","type":"uint256"},{"internalType":"uint256","name":"amountETHMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"bool","name":"approveMax","type":"bool"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"removeLiquidityETHWithPermitSupportingFeeOnTransferTokens","outputs":[{"internalType":"uint256","name":"amountETH","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"tokenA","type":"address"},{"internalType":"address","name":"tokenB","type":"address"},{"internalType":"uint256","name":"liquidity","type":"uint256"},{"internalType":"uint256","name":"amountAMin","type":"uint256"},{"internalType":"uint256","name":"amountBMin","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"bool","name":"approveMax","type":"bool"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"removeLiquidityWithPermit","outputs":[{"internalType":"uint256","name":"amountA","type":"uint256"},{"internalType":"uint256","name":"amountB","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapETHForExactTokens","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactETHForTokens","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactETHForTokensSupportingFeeOnTransferTokens","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactTokensForETH","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactTokensForETHSupportingFeeOnTransferTokens","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactTokensForTokens","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountIn","type":"uint256"},{"internalType":"uint256","name":"amountOutMin","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapExactTokensForTokensSupportingFeeOnTransferTokens","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMax","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapTokensForExactETH","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amountOut","type":"uint256"},{"internalType":"uint256","name":"amountInMax","type":"uint256"},{"internalType":"address[]","name":"path","type":"address[]"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"deadline","type":"uint256"}],"name":"swapTokensForExactTokens","outputs":[{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"stateMutability":"nonpayable","type":"function"},{"stateMutability":"payable","type":"receive"}]',
} | 2,499.166667 | 11,900 | 0.675559 | 1,407 | 14,995 | 7.191187 | 0.075338 | 0.136984 | 0.23641 | 0.109705 | 0.890295 | 0.879126 | 0.853825 | 0.839494 | 0.82032 | 0.797193 | 0 | 0.0471 | 0.001801 | 14,995 | 6 | 11,901 | 2,499.166667 | 0.628875 | 0 | 0 | 0 | 0 | 0.333333 | 0.995666 | 0.991398 | 0 | 0 | 0.005601 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 |
43b4382521927fda47782eb8c779a4eb86cd9cbd | 14,449 | py | Python | tests/test_datasets/test_top_down_dataset.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | 1 | 2020-09-22T03:39:47.000Z | 2020-09-22T03:39:47.000Z | tests/test_datasets/test_top_down_dataset.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | null | null | null | tests/test_datasets/test_top_down_dataset.py | filipkro/mmpose | b4b6eda3fe3c2470ab0e44936f4bf7f82db6d3e4 | [
"Apache-2.0"
] | null | null | null | import copy
import tempfile
from unittest.mock import MagicMock
import json_tricks as json
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from mmpose.datasets import DATASETS
def load_json_to_output(json_name):
data = json.load(open(json_name, 'r'))
outputs = []
for image_info, anno in zip(data['images'], data['annotations']):
keypoints = np.array(
anno['keypoints'], dtype=np.float32).reshape((1, -1, 3))
box = np.array([0, 0, 0, 0, 0, 0], dtype=np.float32).reshape(1, -1)
img_path = []
img_path[:0] = image_info['file_name']
output = (keypoints, box, img_path)
outputs.append(output)
return outputs
def test_top_down_COCO_dataset():
dataset = 'TopDownCocoDataset'
# test COCO datasets
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
bbox_thr=1.0,
use_gt_bbox=True,
image_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
# Test det bbox
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
_ = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
_ = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
# Test gt bbox
custom_dataset = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.test_mode is True
image_id = 785
assert image_id in custom_dataset.image_set_index
assert len(custom_dataset.image_set_index) == 4
_ = custom_dataset[0]
def test_top_down_COCO_wholebody_dataset():
dataset = 'TopDownCocoWholeBodyDataset'
# test COCO datasets
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(
num_output_channels=133,
dataset_joints=133,
dataset_channel=[
list(range(133)),
],
inference_channel=list(range(133)))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
bbox_thr=1.0,
use_gt_bbox=True,
image_thr=0.0,
bbox_file='tests/data/coco/test_coco_det_AP_H_56.json',
)
# Test det bbox
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
_ = dataset_class(
ann_file='tests/data/coco/test_coco_wholebody.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
_ = dataset_class(
ann_file='tests/data/coco/test_coco_wholebody.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
# Test gt bbox
custom_dataset = dataset_class(
ann_file='tests/data/coco/test_coco_wholebody.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.test_mode is True
image_id = 785
assert image_id in custom_dataset.image_set_index
assert len(custom_dataset.image_set_index) == 4
_ = custom_dataset[0]
def test_top_down_OCHuman_dataset():
dataset = 'TopDownOCHumanDataset'
# test OCHuman datasets
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
bbox_thr=1.0,
use_gt_bbox=True,
image_thr=0.0,
bbox_file='',
)
with pytest.raises(AssertionError):
# Test det bbox
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
_ = dataset_class(
ann_file='tests/data/ochuman/test_ochuman.json',
img_prefix='tests/data/ochuman/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
# Test gt bbox
custom_dataset = dataset_class(
ann_file='tests/data/ochuman/test_ochuman.json',
img_prefix='tests/data/ochuman/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.test_mode is True
image_id = 1
assert image_id in custom_dataset.image_set_index
assert len(custom_dataset.image_set_index) == 3
_ = custom_dataset[0]
def test_top_down_OneHand10K_dataset():
dataset = 'TopDownOneHand10KDataset'
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
])
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
# Test
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(
ann_file='tests/data/onehand10k/test_onehand10k.json',
img_prefix='tests/data/onehand10k/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
custom_dataset = dataset_class(
ann_file='tests/data/onehand10k/test_onehand10k.json',
img_prefix='tests/data/onehand10k/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
assert custom_dataset.test_mode is False
assert custom_dataset.num_images == 4
_ = custom_dataset[0]
outputs = load_json_to_output('tests/data/onehand10k/test_onehand10k.json')
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
assert_almost_equal(infos['PCK'], 1.0)
assert_almost_equal(infos['AUC'], 0.95)
assert_almost_equal(infos['EPE'], 0.0)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_top_down_FreiHand_dataset():
dataset = 'TopDownFreiHandDataset'
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
])
data_cfg = dict(
image_size=[224, 224],
heatmap_size=[56, 56],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
# Test
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(
ann_file='tests/data/freihand/test_freihand.json',
img_prefix='tests/data/freihand/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
custom_dataset = dataset_class(
ann_file='tests/data/freihand/test_freihand.json',
img_prefix='tests/data/freihand/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
assert custom_dataset.test_mode is False
assert custom_dataset.num_images == 8
_ = custom_dataset[0]
outputs = load_json_to_output('tests/data/freihand/test_freihand.json')
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK', 'EPE', 'AUC'])
assert_almost_equal(infos['PCK'], 1.0)
assert_almost_equal(infos['AUC'], 0.95)
assert_almost_equal(infos['EPE'], 0.0)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
def test_top_down_MPII_dataset():
dataset = 'TopDownMpiiDataset'
# test COCO datasets
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(
num_output_channels=16,
dataset_joints=16,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
])
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
)
# Test det bbox
data_cfg_copy = copy.deepcopy(data_cfg)
custom_dataset = dataset_class(
ann_file='tests/data/mpii/test_mpii.json',
img_prefix='tests/data/mpii/',
data_cfg=data_cfg_copy,
pipeline=[])
assert len(custom_dataset) == 5
_ = custom_dataset[0]
def test_top_down_MPII_TRB_dataset():
dataset = 'TopDownMpiiTrbDataset'
# test MPII TRB datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
num_output_channels=40,
dataset_joints=40,
dataset_channel=[list(range(40))],
inference_channel=list(range(40)))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(
ann_file='tests/data/mpii/test_mpii_trb.json',
img_prefix='tests/data/mpii/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/mpii/test_mpii_trb.json',
img_prefix='tests/data/mpii/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.test_mode is True
_ = custom_dataset[0]
def test_top_down_AIC_dataset():
dataset = 'TopDownAicDataset'
# test AIC datasets
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
bbox_thr=1.0,
use_gt_bbox=True,
image_thr=0.0,
bbox_file='')
with pytest.raises(AssertionError):
# Test det bbox
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
_ = dataset_class(
ann_file='tests/data/aic/test_aic.json',
img_prefix='tests/data/aic/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=True)
_ = dataset_class(
ann_file='tests/data/aic/test_aic.json',
img_prefix='tests/data/aic/',
data_cfg=data_cfg_copy,
pipeline=[],
test_mode=False)
# Test gt bbox
custom_dataset = dataset_class(
ann_file='tests/data/aic/test_aic.json',
img_prefix='tests/data/aic/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.test_mode is True
image_id = 1
assert image_id in custom_dataset.image_set_index
assert len(custom_dataset.image_set_index) == 3
_ = custom_dataset[0]
| 30.742553 | 79 | 0.621427 | 1,891 | 14,449 | 4.434691 | 0.078794 | 0.053422 | 0.032793 | 0.036728 | 0.883139 | 0.878846 | 0.86716 | 0.858812 | 0.858812 | 0.8544 | 0 | 0.051193 | 0.263202 | 14,449 | 469 | 80 | 30.808102 | 0.736521 | 0.017371 | 0 | 0.820779 | 0 | 0 | 0.134626 | 0.066784 | 0 | 0 | 0 | 0 | 0.07013 | 1 | 0.023377 | false | 0 | 0.020779 | 0 | 0.046753 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
78e9bb1f4fe718f80fd132a61fc02006bd6442e8 | 271 | py | Python | colour/graph/__init__.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 1,380 | 2015-01-10T12:30:33.000Z | 2022-03-30T10:19:57.000Z | colour/graph/__init__.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 638 | 2015-01-02T10:49:05.000Z | 2022-03-29T10:16:22.000Z | colour/graph/__init__.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 250 | 2015-01-21T15:27:19.000Z | 2022-03-30T10:23:58.000Z | # -*- coding: utf-8 -*-
from .conversion import (CONVERSION_GRAPH, CONVERSION_GRAPH_NODE_LABELS,
describe_conversion_path, convert)
__all__ = [
'CONVERSION_GRAPH', 'CONVERSION_GRAPH_NODE_LABELS',
'describe_conversion_path', 'convert'
]
| 27.1 | 72 | 0.690037 | 27 | 271 | 6.333333 | 0.481481 | 0.350877 | 0.292398 | 0.350877 | 0.807018 | 0.807018 | 0.807018 | 0.807018 | 0.807018 | 0.807018 | 0 | 0.00463 | 0.202952 | 271 | 9 | 73 | 30.111111 | 0.787037 | 0.077491 | 0 | 0 | 0 | 0 | 0.302419 | 0.209677 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
60193e9ede0836c32d8d83d41d201242d7f00544 | 17,524 | py | Python | dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/client.py | yndu13/dingtalk-sdk | 700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f | [
"Apache-2.0"
] | null | null | null | dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/client.py | yndu13/dingtalk-sdk | 700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f | [
"Apache-2.0"
] | null | null | null | dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/client.py | yndu13/dingtalk-sdk | 700fb7bb49c4d3167f84afc5fcb5e7aa5a09735f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.alitrip_1_0 import models as dingtalkalitrip__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def add_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.AddCityCarApplyHeaders()
return self.add_city_car_apply_with_options(request, headers, runtime)
async def add_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.AddCityCarApplyHeaders()
return await self.add_city_car_apply_with_options_async(request, headers, runtime)
def add_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.AddCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.cause):
body['cause'] = request.cause
if not UtilClient.is_unset(request.city):
body['city'] = request.city
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.date):
body['date'] = request.date
if not UtilClient.is_unset(request.project_code):
body['projectCode'] = request.project_code
if not UtilClient.is_unset(request.project_name):
body['projectName'] = request.project_name
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.third_part_cost_center_id):
body['thirdPartCostCenterId'] = request.third_part_cost_center_id
if not UtilClient.is_unset(request.third_part_invoice_id):
body['thirdPartInvoiceId'] = request.third_part_invoice_id
if not UtilClient.is_unset(request.times_total):
body['timesTotal'] = request.times_total
if not UtilClient.is_unset(request.times_type):
body['timesType'] = request.times_type
if not UtilClient.is_unset(request.times_used):
body['timesUsed'] = request.times_used
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.finished_date):
body['finishedDate'] = request.finished_date
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.AddCityCarApplyResponse(),
self.do_roarequest('AddCityCarApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def add_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.AddCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.AddCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.AddCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.cause):
body['cause'] = request.cause
if not UtilClient.is_unset(request.city):
body['city'] = request.city
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.date):
body['date'] = request.date
if not UtilClient.is_unset(request.project_code):
body['projectCode'] = request.project_code
if not UtilClient.is_unset(request.project_name):
body['projectName'] = request.project_name
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.third_part_cost_center_id):
body['thirdPartCostCenterId'] = request.third_part_cost_center_id
if not UtilClient.is_unset(request.third_part_invoice_id):
body['thirdPartInvoiceId'] = request.third_part_invoice_id
if not UtilClient.is_unset(request.times_total):
body['timesTotal'] = request.times_total
if not UtilClient.is_unset(request.times_type):
body['timesType'] = request.times_type
if not UtilClient.is_unset(request.times_used):
body['timesUsed'] = request.times_used
if not UtilClient.is_unset(request.title):
body['title'] = request.title
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
if not UtilClient.is_unset(request.finished_date):
body['finishedDate'] = request.finished_date
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.AddCityCarApplyResponse(),
await self.do_roarequest_async('AddCityCarApply', 'alitrip_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
def approve_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders()
return self.approve_city_car_apply_with_options(request, headers, runtime)
async def approve_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders()
return await self.approve_city_car_apply_with_options_async(request, headers, runtime)
def approve_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.operate_time):
body['operateTime'] = request.operate_time
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse(),
self.do_roarequest('ApproveCityCarApply', 'alitrip_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def approve_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.ApproveCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.ApproveCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.corp_id):
body['corpId'] = request.corp_id
if not UtilClient.is_unset(request.operate_time):
body['operateTime'] = request.operate_time
if not UtilClient.is_unset(request.remark):
body['remark'] = request.remark
if not UtilClient.is_unset(request.status):
body['status'] = request.status
if not UtilClient.is_unset(request.third_part_apply_id):
body['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.ding_suite_key):
body['dingSuiteKey'] = request.ding_suite_key
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.ding_token_grant_type):
body['dingTokenGrantType'] = request.ding_token_grant_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.ApproveCityCarApplyResponse(),
await self.do_roarequest_async('ApproveCityCarApply', 'alitrip_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
def query_city_car_apply(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders()
return self.query_city_car_apply_with_options(request, headers, runtime)
async def query_city_car_apply_async(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders()
return await self.query_city_car_apply_with_options_async(request, headers, runtime)
def query_city_car_apply_with_options(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.created_end_at):
query['createdEndAt'] = request.created_end_at
if not UtilClient.is_unset(request.created_start_at):
query['createdStartAt'] = request.created_start_at
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryCityCarApplyResponse(),
self.do_roarequest('QueryCityCarApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
async def query_city_car_apply_with_options_async(
self,
request: dingtalkalitrip__1__0_models.QueryCityCarApplyRequest,
headers: dingtalkalitrip__1__0_models.QueryCityCarApplyHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkalitrip__1__0_models.QueryCityCarApplyResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.corp_id):
query['corpId'] = request.corp_id
if not UtilClient.is_unset(request.created_end_at):
query['createdEndAt'] = request.created_end_at
if not UtilClient.is_unset(request.created_start_at):
query['createdStartAt'] = request.created_start_at
if not UtilClient.is_unset(request.page_number):
query['pageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['pageSize'] = request.page_size
if not UtilClient.is_unset(request.third_part_apply_id):
query['thirdPartApplyId'] = request.third_part_apply_id
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkalitrip__1__0_models.QueryCityCarApplyResponse(),
await self.do_roarequest_async('QueryCityCarApply', 'alitrip_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/alitrip/cityCarApprovals', 'json', req, runtime)
)
| 50.794203 | 158 | 0.696188 | 2,002 | 17,524 | 5.717782 | 0.075924 | 0.035817 | 0.107452 | 0.121779 | 0.954049 | 0.9378 | 0.936752 | 0.928103 | 0.928103 | 0.924959 | 0 | 0.00823 | 0.216503 | 17,524 | 344 | 159 | 50.94186 | 0.825492 | 0.004565 | 0 | 0.842593 | 1 | 0 | 0.075617 | 0.022031 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021605 | false | 0 | 0.021605 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
60493a29b14e5ea09aef8cab8613e2a0cc7dcca3 | 12,006 | py | Python | seq2seq_data_generation.py | manluow/d3p | 23a33195d6fc4c0db60b24f3f871094a1f2cf8ab | [
"MIT"
] | 3 | 2021-04-20T13:22:45.000Z | 2021-04-27T09:46:28.000Z | seq2seq_data_generation.py | manluow/d3p | 23a33195d6fc4c0db60b24f3f871094a1f2cf8ab | [
"MIT"
] | null | null | null | seq2seq_data_generation.py | manluow/d3p | 23a33195d6fc4c0db60b24f3f871094a1f2cf8ab | [
"MIT"
] | 1 | 2021-04-22T15:22:13.000Z | 2021-04-22T15:22:13.000Z | # -*- coding: utf-8 -*-
import os
import json
import pickle
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from utils import day_of_month
def is_first_day(shop_timeline, seq, dt):
'''Jude whether a day is the first day of the sequence
'''
timeline = shop_timeline[seq]
for tl in timeline:
if tl[0] <= dt < tl[1] and tl[0] == dt:
return True
return False
def is_open(shop_timeline, seq, dt):
'''Jude whether a day is still open at the specified day
'''
timeline = shop_timeline[seq]
for tl in timeline:
if tl[0] <= dt < tl[1]:
return True
return False
def get_prev(shop_id, cache_orders, cache_masks, cache_ext, cur_mon, cur_day, start_mon, n_day):
'''Get the weekly average number of orders in previous n_days
Arg:
shop_id: the id of the station
cur_mon: the current month
cur_day: the current day
start_mon : the start month
n_day: the number of days will be calculated
'''
end_idx = cur_day - 1
for i in range(start_mon, cur_mon):
end_idx += day_of_month(i)
start_idx = end_idx - n_day
r_orders = cache_orders[shop_id, start_idx: end_idx]
r_masks = cache_masks[shop_id, start_idx: end_idx]
r_ext = cache_ext[shop_id, start_idx: end_idx]
return r_orders, r_masks, r_ext
def get_next(shop_id, cache_orders, cache_masks, cache_ext, cur_mon, cur_day, start_mon, n_day=14):
'''Get the weekly average number of orders in next n_days
Arg:
shop_id: the id of the station
cur_mon: the current month
cur_day: the current day
start_mon : the start month
n_day: the number of days will be calculated
'''
start_idx = cur_day - 1
for i in range(start_mon, cur_mon):
start_idx += day_of_month(i)
end_idx = start_idx + n_day
# Warning: we need previous day to start decoder
r_orders = cache_orders[shop_id, start_idx - 1: end_idx]
# End
r_masks = cache_masks[shop_id, start_idx: end_idx]
r_ext = cache_ext[shop_id, start_idx: end_idx]
return r_orders, r_masks, r_ext
def main():
# weather = pickle.load(open('cache/weather.pkl', 'rb'))
shops = np.genfromtxt('data/shops_example.csv', delimiter=',')
n_shops = shops.shape[0]
id2seq = {idx: seq for idx, seq in enumerate(shops[:, 0].astype(np.int32))}
seq2id = {seq: idx for idx, seq in enumerate(shops[:, 0].astype(np.int32))}
# Processing online and offline events
shop_timeline = pickle.load(open('cache/timeline.pkl', 'rb'))
shop_info = pd.read_csv('data/shop_info_example.csv')
restrict = {}
n_parks = {}
for idx, shop in shop_info.iterrows():
shop_seq = shop['SHOP_SEQ']
is_restrict = shop['IS_RESTRICT']
n_park = shop['PARK_NUM']
restrict[shop_seq] = is_restrict
n_parks[shop_seq] = n_park
dis = np.load('cache/dis.npy')
for i in range(n_shops - 1):
for j in range(i + 1, n_shops):
dis[i, j] = 1 / dis[i, j]
dis[j, i] = dis[i, j]
poi = np.load('cache/poi.npy')
# Prepare the training dataset
cache_orders = []
cache_masks = []
cache_ext = []
for month in [1]:
mats = np.load('cache/mat%d.npy' % month)
for day in range(1, day_of_month(month) + 1):
tmp_order = np.full((n_shops,), -1.0, dtype=np.float32)
tmp_mask = np.full((n_shops,), 0.0, dtype=np.float32)
tmp_ext = np.zeros((n_shops, 7), dtype=np.float32)
mat = mats[day]
n_returns = np.sum(mat, 1)
cur_dt = datetime(1991, month, day)
for i in range(n_shops):
seq = id2seq[i]
if not is_open(shop_timeline, seq, cur_dt):
continue
n_return = n_returns[i]
tmp_order[i] = n_return
tmp_mask[i] = 1.0
tmp_ext[i, cur_dt.weekday()] = 1.0
cache_orders.append(tmp_order)
cache_masks.append(tmp_mask)
cache_ext.append(tmp_ext)
cache_orders = np.transpose(np.array(cache_orders))
cache_masks = np.transpose(np.array(cache_masks))
cache_ext = np.transpose(np.array(cache_ext), [1, 0, 2])
x = [] # The input
o = [] # The number of orders in previous weeks
ext_inp = [] # The day of the week
ext_oup = [] # The day of the week
mask1 = [] # The mask of the graph
mask2 = [] # The mask of the prediction
mask3 = [] # The mask of the new stations
y = [] # The prediction
for month in [1]:
pickup_amounts = np.load('cache/pickup_amounts%d.npy' % month)
return_amounts = np.load('cache/return_amounts%d.npy' % month)
for day in range(15, day_of_month(month) + 1):
print(month, day)
pickup_amount = pickup_amounts[day]
return_amount = return_amounts[day]
cur_dt = datetime(1991, month, day)
# Select the data from 1st, Jan, 1991 to 16th, Jan, 1991
if datetime(1991, 1, 1) <= cur_dt < datetime(1991, 1, 16):
t_x = np.zeros((n_shops, 649), dtype=np.float32)
t_o = np.zeros((n_shops, 14, 1), dtype=np.float32)
t_ext_inp = np.zeros((n_shops, 14, 7), dtype=np.float32)
t_ext_oup = np.zeros((n_shops, 7, 7), dtype=np.float32)
t_y = np.zeros((n_shops, 8, 1), dtype=np.float32)
t_mask1 = np.zeros((n_shops, 1), dtype=np.float32)
t_mask2 = np.zeros((n_shops, 7, 1), dtype=np.float32)
t_mask3 = np.zeros((n_shops, 1), dtype=np.float32)
for i in range(n_shops):
seq = id2seq[i]
n_park = n_parks[seq]
n_pickup_amount = pickup_amount[i]
n_return_amount = return_amount[i]
t_x[i, :646] = shops[i, 227:]
t_x[i, 646] = n_pickup_amount
t_x[i, 647] = n_return_amount
t_x[i, 648] = n_park
if is_first_day(shop_timeline, seq, cur_dt):
t_mask3[i] = 1.0
a, _, c = get_prev(i,cache_orders, cache_masks, cache_ext, month, day, 1, 14)
t_o[i] = np.expand_dims(a, 1)
t_mask1[i] = 1.0
t_ext_inp[i] = c
a, b, c = get_next(i, cache_orders, cache_masks, cache_ext, month, day, 1, 7)
t_y[i] = np.expand_dims(a, 1)
t_mask2[i] = np.expand_dims(b, 1)
t_ext_oup[i] = c
x.append(t_x)
o.append(t_o)
ext_inp.append(t_ext_inp)
ext_oup.append(t_ext_oup)
mask1.append(t_mask1)
mask2.append(t_mask2)
mask3.append(t_mask3)
y.append(t_y)
x = np.array(x)
o = np.array(o)
ext_inp = np.array(ext_inp)
ext_oup = np.array(ext_oup)
mask1 = np.array(mask1)
mask2 = np.array(mask2)
mask3 = np.array(mask3)
y = np.array(y)
if os.path.exists('dev/train') == False:
os.makedirs('dev/train')
np.save('dev/train/x', x)
np.save('dev/train/o', o)
np.save('dev/train/ext_inp', ext_inp)
np.save('dev/train/ext_oup', ext_oup)
np.save('dev/train/mask1', mask1)
np.save('dev/train/mask2', mask2)
np.save('dev/train/mask3', mask3)
np.save('dev/train/y', y)
# Prepare the testing dataset
cache_orders = []
cache_masks = []
cache_ext = []
for month in [1]:
mats = np.load('cache/mat%d.npy' % month)
for day in range(1, day_of_month(month) + 1):
tmp_order = np.full((n_shops,), -1.0, dtype=np.float32)
tmp_mask = np.full((n_shops,), 0.0, dtype=np.float32)
tmp_ext = np.zeros((n_shops, 7), dtype=np.float32)
mat = mats[day]
n_returns = np.sum(mat, 1)
cur_dt = datetime(1991, month, day)
for i in range(n_shops):
seq = id2seq[i]
if not is_open(shop_timeline, seq, cur_dt):
continue
n_return = n_returns[i]
tmp_order[i] = n_return
tmp_mask[i] = 1.0
tmp_ext[i, cur_dt.weekday()] = 1.0
cache_orders.append(tmp_order)
cache_masks.append(tmp_mask)
cache_ext.append(tmp_ext)
cache_orders = np.transpose(np.array(cache_orders))
cache_masks = np.transpose(np.array(cache_masks))
cache_ext = np.transpose(np.array(cache_ext), [1, 0, 2])
x = [] # The input
o = [] # The number of orders in previous weeks
ext_inp = [] # The day of the week
ext_oup = [] # The day of the week
mask1 = [] # The mask of the graph
mask2 = [] # The mask of the prediction
mask3 = [] # The mask of the new stations
y = [] # The prediction
for month in [1]:
pickup_amounts = np.load('cache/pickup_amounts%d.npy' % month)
return_amounts = np.load('cache/return_amounts%d.npy' % month)
# 1 - 30
for day in range(15, day_of_month(month) + 1):
print(month, day)
pickup_amount = pickup_amounts[day]
return_amount = return_amounts[day]
cur_dt = datetime(1991, month, day)
# Select the data from 1st, Jan, 1991 to 16th, Jan, 1991
if datetime(1991, 1, 1) <= cur_dt < datetime(1991, 1, 16):
t_x = np.zeros((n_shops, 649), dtype=np.float32)
t_o = np.zeros((n_shops, 14, 1), dtype=np.float32)
t_y = np.zeros((n_shops, 8, 1), dtype=np.float32)
t_ext_inp = np.zeros((n_shops, 14, 7), dtype=np.float32)
t_ext_oup = np.zeros((n_shops, 7, 7), dtype=np.float32)
t_mask1 = np.zeros((n_shops, 1), dtype=np.float32)
t_mask2 = np.zeros((n_shops, 7, 1), dtype=np.float32)
t_mask3 = np.zeros((n_shops, 1), dtype=np.float32)
for i in range(n_shops):
seq = id2seq[i]
n_park = n_parks[seq]
n_pickup_amount = pickup_amount[i]
n_return_amount = return_amount[i]
t_x[i, :646] = shops[i, 227:] # 1~4, 224~227
t_x[i, 646] = n_pickup_amount
t_x[i, 647] = n_return_amount
t_x[i, 648] = n_park
if is_first_day(shop_timeline, seq, cur_dt):
t_mask3[i] = 1.0
a, _, c = get_prev(i, cache_orders, cache_masks, cache_ext, month, day, 1, 14)
t_o[i] = np.expand_dims(a, 1)
t_mask1[i] = 1.0
t_ext_inp[i] = c
a, b, c = get_next(i, cache_orders, cache_masks, cache_ext, month, day, 1, 7)
t_y[i] = np.expand_dims(a, 1)
t_mask2[i] = np.expand_dims(b, 1)
t_ext_oup[i] = c
x.append(t_x)
o.append(t_o)
ext_inp.append(t_ext_inp)
ext_oup.append(t_ext_oup)
mask1.append(t_mask1)
mask2.append(t_mask2)
mask3.append(t_mask3)
y.append(t_y)
x = np.array(x)
o = np.array(o)
mask1 = np.array(mask1)
mask2 = np.array(mask2)
mask3 = np.array(mask3)
y = np.array(y)
if os.path.exists('dev/test') == False:
os.makedirs('dev/test')
np.save('dev/test/x', x)
np.save('dev/test/o', o)
np.save('dev/test/ext_inp', ext_inp)
np.save('dev/test/ext_oup', ext_oup)
np.save('dev/test/mask1', mask1)
np.save('dev/test/mask2', mask2)
np.save('dev/test/mask3', mask3)
np.save('dev/test/y', y)
if __name__ == "__main__":
main()
| 32.187668 | 99 | 0.550308 | 1,799 | 12,006 | 3.4597 | 0.101167 | 0.027956 | 0.049486 | 0.037596 | 0.838689 | 0.795951 | 0.791613 | 0.777153 | 0.755623 | 0.744698 | 0 | 0.04075 | 0.329585 | 12,006 | 372 | 100 | 32.274194 | 0.732513 | 0.109112 | 0 | 0.736 | 0 | 0 | 0.04855 | 0.014357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.028 | 0 | 0.072 | 0.008 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
606f875bf7200f4691166adae65ab3b5c936bd64 | 15,642 | py | Python | tests/dhcpv4/relay/test_v4_request_relay_part1.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | tests/dhcpv4/relay/test_v4_request_relay_part1.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | tests/dhcpv4/relay/test_v4_request_relay_part1.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | """DHCPv4 address request process"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_msg
import srv_control
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_chaddr():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_chaddr_empty_pool():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_requests_option('1')
srv_msg.client_sets_value('Client', 'chaddr', 'ff:01:02:03:ff:04')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_client_id_empty_pool():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00020304050607')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_client_id_chaddr_empty_pool():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_include_option('Response', None, '61')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
srv_msg.response_check_option_content('Response', '61', None, 'value', '00010203040506')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_does_include_with_value('client_id', '11020304050607')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '11020304050607')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.dhcp4
@pytest.mark.relay
@pytest.mark.request
def test_v4_request_relay_selecting_success_second_request_fail():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.network_variable('source_port', '67')
srv_msg.network_variable('source_address', '$(GIADDR4)')
srv_msg.network_variable('destination_address', '$(SRV4_ADDR)')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'OFFER')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ACK')
srv_msg.response_check_content('Response', None, 'yiaddr', '192.168.50.1')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '1', None, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:22:11:00')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option('1')
srv_msg.client_sets_value('Client', 'giaddr', '$(GIADDR4)')
srv_msg.client_sets_value('Client', 'hops', '1')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'NAK')
srv_msg.response_check_include_option('Response', None, '54')
srv_msg.response_check_option_content('Response', '54', None, 'value', '$(SRV4_ADDR)')
| 44.4375 | 92 | 0.726825 | 2,295 | 15,642 | 4.568627 | 0.044444 | 0.115021 | 0.119027 | 0.110539 | 0.984359 | 0.984359 | 0.984359 | 0.984359 | 0.983786 | 0.983786 | 0 | 0.071819 | 0.111623 | 15,642 | 351 | 93 | 44.564103 | 0.682714 | 0.004731 | 0 | 0.94863 | 0 | 0 | 0.231076 | 0.009639 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020548 | true | 0.058219 | 0.013699 | 0 | 0.034247 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
609ce34e825e4f2f0b691fb63af6eb6d31136780 | 2,316 | py | Python | build/rosapi/cmake/rosapi-genmsg-context.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/rosapi/cmake/rosapi-genmsg-context.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/rosapi/cmake/rosapi-genmsg-context.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/msg/TypeDef.msg"
services_str = "/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/DeleteParam.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/GetActionServers.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/GetParam.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/GetParamNames.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/GetTime.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/HasParam.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/MessageDetails.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/Nodes.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/NodeDetails.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/Publishers.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/SearchParam.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceHost.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceNode.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceProviders.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceRequestDetails.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceResponseDetails.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/Services.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServicesForType.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/ServiceType.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/SetParam.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/Subscribers.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/Topics.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/TopicsForType.srv;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/srv/TopicType.srv"
pkg_name = "rosapi"
dependencies_str = ""
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "rosapi;/home/kalyco/mfp_workspace/src/rosbridge_suite/rosapi/msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 193 | 1,804 | 0.847582 | 339 | 2,316 | 5.589971 | 0.215339 | 0.137203 | 0.178364 | 0.301847 | 0.698153 | 0.698153 | 0.698153 | 0.698153 | 0.698153 | 0.618997 | 0 | 0 | 0.01468 | 2,316 | 11 | 1,805 | 210.545455 | 0.830412 | 0.021157 | 0 | 0 | 1 | 0.111111 | 0.910375 | 0.897572 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
60bde60bb065c2df3b555757eebd33aba9f281de | 32,708 | py | Python | infoblox_netmri/api/broker/v3_8_0/device_filter_stat_broker.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 12 | 2016-02-19T12:37:54.000Z | 2022-03-04T20:11:08.000Z | infoblox_netmri/api/broker/v3_8_0/device_filter_stat_broker.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2015-11-12T18:37:00.000Z | 2021-05-19T07:59:55.000Z | infoblox_netmri/api/broker/v3_8_0/device_filter_stat_broker.py | azinfoblox/infoblox-netmri | 02372c5231e2677ab6299cb659a73c9a41b4b0f4 | [
"Apache-2.0"
] | 18 | 2016-01-07T12:04:34.000Z | 2022-03-31T11:05:41.000Z | from ..broker import Broker
class DeviceFilterStatBroker(Broker):
controller = "device_filter_stats"
def show(self, **kwargs):
"""Shows the details for the specified device filter stat.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceFilterStatsID: The internal NetMRI identifier of the device filter stat.
:type DeviceFilterStatsID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter stat methods. The listed methods will be called on each device filter stat returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_stat: The device filter stat identified by the specified DeviceFilterStatsID.
:rtype device_filter_stat: DeviceFilterStat
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available device filter stats. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterStatsID: No description is available for DeviceFilterStatsID.
:type DeviceFilterStatsID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: No description is available for DeviceID.
:type DeviceID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: No description is available for Timestamp.
:type Timestamp: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device filter stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device filter stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter stat methods. The listed methods will be called on each device filter stat returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterStatsID. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterStat. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_stats: An array of the DeviceFilterStat objects that match the specified input criteria.
:rtype device_filter_stats: Array of DeviceFilterStat
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device filter stats matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: No description is available for DataSourceID.
:type DataSourceID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterID: No description is available for DeviceFilterID.
:type DeviceFilterID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFilterStatsID: No description is available for DeviceFilterStatsID.
:type DeviceFilterStatsID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: No description is available for DeviceID.
:type DeviceID: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HitCount: No description is available for HitCount.
:type HitCount: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: No description is available for Timestamp.
:type Timestamp: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device filter stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device filter stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter stat methods. The listed methods will be called on each device filter stat returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterStatsID. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterStat. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device filter stats, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceFilterID, DeviceFilterStatsID, DeviceID, HitCount, Timestamp.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_stats: An array of the DeviceFilterStat objects that match the specified input criteria.
:rtype device_filter_stats: Array of DeviceFilterStat
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device filter stats matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceFilterID, DeviceFilterStatsID, DeviceID, HitCount, Timestamp.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: No description is available for DataSourceID. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFilterID: The operator to apply to the field DeviceFilterID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFilterID: No description is available for DeviceFilterID. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFilterID: If op_DeviceFilterID is specified, the field named in this input will be compared to the value in DeviceFilterID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFilterID must be specified if op_DeviceFilterID is specified.
:type val_f_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFilterID: If op_DeviceFilterID is specified, this value will be compared to the value in DeviceFilterID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFilterID must be specified if op_DeviceFilterID is specified.
:type val_c_DeviceFilterID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFilterStatsID: The operator to apply to the field DeviceFilterStatsID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFilterStatsID: No description is available for DeviceFilterStatsID. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFilterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFilterStatsID: If op_DeviceFilterStatsID is specified, the field named in this input will be compared to the value in DeviceFilterStatsID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFilterStatsID must be specified if op_DeviceFilterStatsID is specified.
:type val_f_DeviceFilterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFilterStatsID: If op_DeviceFilterStatsID is specified, this value will be compared to the value in DeviceFilterStatsID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFilterStatsID must be specified if op_DeviceFilterStatsID is specified.
:type val_c_DeviceFilterStatsID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: No description is available for DeviceID. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_HitCount: The operator to apply to the field HitCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. HitCount: No description is available for HitCount. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_HitCount: If op_HitCount is specified, the field named in this input will be compared to the value in HitCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_HitCount must be specified if op_HitCount is specified.
:type val_f_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_HitCount: If op_HitCount is specified, this value will be compared to the value in HitCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_HitCount must be specified if op_HitCount is specified.
:type val_c_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Timestamp: The operator to apply to the field Timestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Timestamp: No description is available for Timestamp. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified.
:type val_f_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified.
:type val_c_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the device filter stats with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the device filter stats with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of device filter stat methods. The listed methods will be called on each device filter stat returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceFilterStatsID
:param sort: The data field(s) to use for sorting the output. Default is DeviceFilterStatsID. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceFilterStat. Valid values are DeviceFilterStatsID, DataSourceID, DeviceID, DeviceFilterID, Timestamp, HitCount. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_filter_stats: An array of the DeviceFilterStat objects that match the specified input criteria.
:rtype device_filter_stats: Array of DeviceFilterStat
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| 51.589905 | 498 | 0.606457 | 3,971 | 32,708 | 4.951901 | 0.068245 | 0.074247 | 0.048261 | 0.06311 | 0.919701 | 0.917667 | 0.893867 | 0.860456 | 0.858218 | 0.858218 | 0 | 0.004306 | 0.311269 | 32,708 | 633 | 499 | 51.671406 | 0.868602 | 0.815917 | 0 | 0 | 0 | 0 | 0.064189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.363636 | false | 0 | 0.090909 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
60e1b39b8aeec13d33f9c7332c271fec25b4551d | 8,135 | py | Python | exercises/house/house_test.py | mbernardes19/python | 57f799f19e3821db5b84af42a7e98becf62f055b | [
"MIT"
] | 9 | 2020-12-12T03:29:33.000Z | 2021-08-11T13:08:06.000Z | exercises/house/house_test.py | mbernardes19/python | 57f799f19e3821db5b84af42a7e98becf62f055b | [
"MIT"
] | null | null | null | exercises/house/house_test.py | mbernardes19/python | 57f799f19e3821db5b84af42a7e98becf62f055b | [
"MIT"
] | 1 | 2020-11-02T10:40:06.000Z | 2020-11-02T10:40:06.000Z | import unittest
from house import recite
# Tests adapted from `problem-specifications//canonical-data.json`
class HouseTest(unittest.TestCase):
def test_verse_one_the_house_that_jack_built(self):
self.assertEqual(recite(1, 1), ["This is the house that Jack built."])
def test_verse_two_the_malt_that_lay(self):
self.assertEqual(
recite(2, 2), ["This is the malt that lay in the house that Jack built."]
)
def test_verse_three_the_rat_that_ate(self):
self.assertEqual(
recite(3, 3),
[
"This is the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_four_the_cat_that_killed(self):
self.assertEqual(
recite(4, 4),
[
"This is the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_five_the_dog_that_worried(self):
self.assertEqual(
recite(5, 5),
[
"This is the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_six_the_cow_with_the_crumpled_horn(self):
self.assertEqual(
recite(6, 6),
[
"This is the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_seven_the_maiden_all_forlorn(self):
self.assertEqual(
recite(7, 7),
[
"This is the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_eight_the_man_all_tattered_and_torn(self):
self.assertEqual(
recite(8, 8),
[
"This is the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_nine_the_priest_all_shaven_and_shorn(self):
self.assertEqual(
recite(9, 9),
[
"This is the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_10_the_rooster_that_crowed_in_the_morn(self):
self.assertEqual(
recite(10, 10),
[
"This is the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_11_the_farmer_sowing_his_corn(self):
self.assertEqual(
recite(11, 11),
[
"This is the farmer sowing his corn that kept the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_verse_12_the_horse_and_the_hound_and_the_horn(self):
self.assertEqual(
recite(12, 12),
[
"This is the horse and the hound and the horn that belonged to the farmer sowing his corn that kept the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built."
],
)
def test_multiple_verses(self):
self.assertEqual(
recite(4, 8),
[
"This is the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
],
)
def test_full_rhyme(self):
self.assertEqual(
recite(1, 12),
[
"This is the house that Jack built.",
"This is the malt that lay in the house that Jack built.",
"This is the rat that ate the malt that lay in the house that Jack built.",
"This is the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the farmer sowing his corn that kept the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
"This is the horse and the hound and the horn that belonged to the farmer sowing his corn that kept the rooster that crowed in the morn that woke the priest all shaven and shorn that married the man all tattered and torn that kissed the maiden all forlorn that milked the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built.",
],
)
if __name__ == "__main__":
unittest.main()
| 62.099237 | 434 | 0.669945 | 1,312 | 8,135 | 4.078506 | 0.071646 | 0.03177 | 0.067277 | 0.089703 | 0.901327 | 0.864511 | 0.850495 | 0.842833 | 0.827322 | 0.827322 | 0 | 0.007125 | 0.292686 | 8,135 | 130 | 435 | 62.576923 | 0.922836 | 0.007867 | 0 | 0.318182 | 0 | 0.209091 | 0.674309 | 0 | 0 | 0 | 0 | 0 | 0.127273 | 1 | 0.127273 | false | 0 | 0.018182 | 0 | 0.154545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
714ac466126e69742cdc60d76e37f433ff54fbc8 | 46,671 | py | Python | nova/tests/unit/scheduler/test_ironic_host_manager.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/scheduler/test_ironic_host_manager.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/scheduler/test_ironic_host_manager.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright (c) 2014 OpenStack Foundation'
nl|'\n'
comment|'# Copyright (c) 2011 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
string|'"""\nTests For IronicHostManager\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
name|'as'
name|'obj_base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'scheduler'
name|'import'
name|'filters'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'scheduler'
name|'import'
name|'host_manager'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'scheduler'
name|'import'
name|'ironic_host_manager'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'scheduler'
name|'import'
name|'ironic_fakes'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|FakeFilterClass1
name|'class'
name|'FakeFilterClass1'
op|'('
name|'filters'
op|'.'
name|'BaseHostFilter'
op|')'
op|':'
newline|'\n'
DECL|member|host_passes
indent|' '
name|'def'
name|'host_passes'
op|'('
name|'self'
op|','
name|'host_state'
op|','
name|'filter_properties'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|FakeFilterClass2
dedent|''
dedent|''
name|'class'
name|'FakeFilterClass2'
op|'('
name|'filters'
op|'.'
name|'BaseHostFilter'
op|')'
op|':'
newline|'\n'
DECL|member|host_passes
indent|' '
name|'def'
name|'host_passes'
op|'('
name|'self'
op|','
name|'host_state'
op|','
name|'filter_properties'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|IronicHostManagerTestCase
dedent|''
dedent|''
name|'class'
name|'IronicHostManagerTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test case for IronicHostManager class."""'
newline|'\n'
nl|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_instance_info'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_aggregates'"
op|')'
newline|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|','
name|'mock_init_agg'
op|','
name|'mock_init_inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'IronicHostManagerTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicHostManager'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_instance_info'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_aggregates'"
op|')'
newline|'\n'
DECL|member|test_manager_public_api_signatures
name|'def'
name|'test_manager_public_api_signatures'
op|'('
name|'self'
op|','
name|'mock_init_aggs'
op|','
nl|'\n'
name|'mock_init_inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertPublicAPISignatures'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|'('
op|')'
op|','
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_state_public_api_signatures
dedent|''
name|'def'
name|'test_state_public_api_signatures'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertPublicAPISignatures'
op|'('
nl|'\n'
name|'host_manager'
op|'.'
name|'HostState'
op|'('
string|'"dummy"'
op|','
nl|'\n'
string|'"dummy"'
op|')'
op|','
nl|'\n'
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
string|'"dummy"'
op|','
nl|'\n'
string|'"dummy"'
op|')'
nl|'\n'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ServiceList.get_by_binary'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ComputeNodeList.get_all'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.InstanceList.get_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_all_host_states
name|'def'
name|'test_get_all_host_states'
op|'('
name|'self'
op|','
name|'mock_get_by_host'
op|','
name|'mock_get_all'
op|','
nl|'\n'
name|'mock_get_by_binary'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get_all'
op|'.'
name|'return_value'
op|'='
name|'ironic_fakes'
op|'.'
name|'COMPUTE_NODES'
newline|'\n'
name|'mock_get_by_binary'
op|'.'
name|'return_value'
op|'='
name|'ironic_fakes'
op|'.'
name|'SERVICES'
newline|'\n'
name|'context'
op|'='
string|"'fake_context'"
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_all_host_states'
op|'('
name|'context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'mock_get_by_host'
op|'.'
name|'call_count'
op|')'
newline|'\n'
name|'host_states_map'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_map'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'host_states_map'
op|')'
op|','
number|'4'
op|')'
newline|'\n'
nl|'\n'
name|'for'
name|'i'
name|'in'
name|'range'
op|'('
number|'4'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_node'
op|'='
name|'ironic_fakes'
op|'.'
name|'COMPUTE_NODES'
op|'['
name|'i'
op|']'
newline|'\n'
name|'host'
op|'='
name|'compute_node'
op|'.'
name|'host'
newline|'\n'
name|'node'
op|'='
name|'compute_node'
op|'.'
name|'hypervisor_hostname'
newline|'\n'
name|'state_key'
op|'='
op|'('
name|'host'
op|','
name|'node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'host_states_map'
op|'['
name|'state_key'
op|']'
op|'.'
name|'service'
op|','
nl|'\n'
name|'obj_base'
op|'.'
name|'obj_to_primitive'
op|'('
nl|'\n'
name|'ironic_fakes'
op|'.'
name|'get_service_by_host'
op|'('
name|'host'
op|')'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'compute_node'
op|'.'
name|'stats'
op|','
nl|'\n'
name|'host_states_map'
op|'['
name|'state_key'
op|']'
op|'.'
name|'stats'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'compute_node'
op|'.'
name|'free_ram_mb'
op|','
nl|'\n'
name|'host_states_map'
op|'['
name|'state_key'
op|']'
op|'.'
name|'free_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'compute_node'
op|'.'
name|'free_disk_gb'
op|'*'
number|'1024'
op|','
nl|'\n'
name|'host_states_map'
op|'['
name|'state_key'
op|']'
op|'.'
name|'free_disk_mb'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|IronicHostManagerChangedNodesTestCase
dedent|''
dedent|''
dedent|''
name|'class'
name|'IronicHostManagerChangedNodesTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test case for IronicHostManager class."""'
newline|'\n'
nl|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_instance_info'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_aggregates'"
op|')'
newline|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|','
name|'mock_init_agg'
op|','
name|'mock_init_inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'IronicHostManagerChangedNodesTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicHostManager'
op|'('
op|')'
newline|'\n'
name|'ironic_driver'
op|'='
string|'"nova.virt.ironic.driver.IronicDriver"'
newline|'\n'
name|'supported_instances'
op|'='
op|'['
nl|'\n'
name|'objects'
op|'.'
name|'HVSpec'
op|'.'
name|'from_list'
op|'('
op|'['
string|'"i386"'
op|','
string|'"baremetal"'
op|','
string|'"baremetal"'
op|']'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'compute_node'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
nl|'\n'
name|'id'
op|'='
number|'1'
op|','
name|'local_gb'
op|'='
number|'10'
op|','
name|'memory_mb'
op|'='
number|'1024'
op|','
name|'vcpus'
op|'='
number|'1'
op|','
nl|'\n'
name|'vcpus_used'
op|'='
number|'0'
op|','
name|'local_gb_used'
op|'='
number|'0'
op|','
name|'memory_mb_used'
op|'='
number|'0'
op|','
nl|'\n'
name|'updated_at'
op|'='
name|'None'
op|','
name|'cpu_info'
op|'='
string|"'baremetal cpu'"
op|','
nl|'\n'
name|'stats'
op|'='
name|'dict'
op|'('
nl|'\n'
name|'ironic_driver'
op|'='
name|'ironic_driver'
op|','
nl|'\n'
name|'cpu_arch'
op|'='
string|"'i386'"
op|')'
op|','
nl|'\n'
name|'supported_hv_specs'
op|'='
name|'supported_instances'
op|','
nl|'\n'
name|'free_disk_gb'
op|'='
number|'10'
op|','
name|'free_ram_mb'
op|'='
number|'1024'
op|','
nl|'\n'
name|'hypervisor_type'
op|'='
string|"'ironic'"
op|','
nl|'\n'
name|'hypervisor_version'
op|'='
number|'1'
op|','
nl|'\n'
name|'hypervisor_hostname'
op|'='
string|"'fake_host'"
op|','
nl|'\n'
name|'cpu_allocation_ratio'
op|'='
number|'16.0'
op|','
name|'ram_allocation_ratio'
op|'='
number|'1.5'
op|','
nl|'\n'
name|'disk_allocation_ratio'
op|'='
number|'1.0'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|','
string|"'__init__'"
op|')'
newline|'\n'
DECL|member|test_create_ironic_node_state
name|'def'
name|'test_create_ironic_node_state'
op|'('
name|'self'
op|','
name|'init_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'init_mock'
op|'.'
name|'return_value'
op|'='
name|'None'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
op|'**'
op|'{'
string|"'hypervisor_type'"
op|':'
string|"'ironic'"
op|'}'
op|')'
newline|'\n'
name|'host_state'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_cls'
op|'('
string|"'fake-host'"
op|','
string|"'fake-node'"
op|','
nl|'\n'
name|'compute'
op|'='
name|'compute'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIs'
op|'('
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|','
name|'type'
op|'('
name|'host_state'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostState'
op|','
string|"'__init__'"
op|')'
newline|'\n'
DECL|member|test_create_non_ironic_host_state
name|'def'
name|'test_create_non_ironic_host_state'
op|'('
name|'self'
op|','
name|'init_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'init_mock'
op|'.'
name|'return_value'
op|'='
name|'None'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
op|'**'
op|'{'
string|"'cpu_info'"
op|':'
string|"'other cpu'"
op|'}'
op|')'
newline|'\n'
name|'host_state'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_cls'
op|'('
string|"'fake-host'"
op|','
string|"'fake-node'"
op|','
nl|'\n'
name|'compute'
op|'='
name|'compute'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIs'
op|'('
name|'host_manager'
op|'.'
name|'HostState'
op|','
name|'type'
op|'('
name|'host_state'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostState'
op|','
string|"'__init__'"
op|')'
newline|'\n'
DECL|member|test_create_host_state_null_compute
name|'def'
name|'test_create_host_state_null_compute'
op|'('
name|'self'
op|','
name|'init_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'init_mock'
op|'.'
name|'return_value'
op|'='
name|'None'
newline|'\n'
name|'host_state'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_cls'
op|'('
string|"'fake-host'"
op|','
string|"'fake-node'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIs'
op|'('
name|'host_manager'
op|'.'
name|'HostState'
op|','
name|'type'
op|'('
name|'host_state'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ServiceList.get_by_binary'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ComputeNodeList.get_all'"
op|')'
newline|'\n'
DECL|member|test_get_all_host_states_after_delete_one
name|'def'
name|'test_get_all_host_states_after_delete_one'
op|'('
name|'self'
op|','
name|'mock_get_all'
op|','
nl|'\n'
name|'mock_get_by_binary'
op|')'
op|':'
newline|'\n'
indent|' '
name|'getter'
op|'='
op|'('
name|'lambda'
name|'n'
op|':'
name|'n'
op|'.'
name|'hypervisor_hostname'
nl|'\n'
name|'if'
string|"'hypervisor_hostname'"
name|'in'
name|'n'
name|'else'
name|'None'
op|')'
newline|'\n'
name|'running_nodes'
op|'='
op|'['
name|'n'
name|'for'
name|'n'
name|'in'
name|'ironic_fakes'
op|'.'
name|'COMPUTE_NODES'
nl|'\n'
name|'if'
name|'getter'
op|'('
name|'n'
op|')'
op|'!='
string|"'node4uuid'"
op|']'
newline|'\n'
nl|'\n'
name|'mock_get_all'
op|'.'
name|'side_effect'
op|'='
op|'['
nl|'\n'
name|'ironic_fakes'
op|'.'
name|'COMPUTE_NODES'
op|','
name|'running_nodes'
op|']'
newline|'\n'
name|'mock_get_by_binary'
op|'.'
name|'side_effect'
op|'='
op|'['
nl|'\n'
name|'ironic_fakes'
op|'.'
name|'SERVICES'
op|','
name|'ironic_fakes'
op|'.'
name|'SERVICES'
op|']'
newline|'\n'
name|'context'
op|'='
string|"'fake_context'"
newline|'\n'
nl|'\n'
comment|'# first call: all nodes'
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_all_host_states'
op|'('
name|'context'
op|')'
newline|'\n'
name|'host_states_map'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_map'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'4'
op|','
name|'len'
op|'('
name|'host_states_map'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# second call: just running nodes'
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_all_host_states'
op|'('
name|'context'
op|')'
newline|'\n'
name|'host_states_map'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_map'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'3'
op|','
name|'len'
op|'('
name|'host_states_map'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ServiceList.get_by_binary'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ComputeNodeList.get_all'"
op|')'
newline|'\n'
DECL|member|test_get_all_host_states_after_delete_all
name|'def'
name|'test_get_all_host_states_after_delete_all'
op|'('
name|'self'
op|','
name|'mock_get_all'
op|','
nl|'\n'
name|'mock_get_by_binary'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get_all'
op|'.'
name|'side_effect'
op|'='
op|'['
nl|'\n'
name|'ironic_fakes'
op|'.'
name|'COMPUTE_NODES'
op|','
op|'['
op|']'
op|']'
newline|'\n'
name|'mock_get_by_binary'
op|'.'
name|'side_effect'
op|'='
op|'['
nl|'\n'
name|'ironic_fakes'
op|'.'
name|'SERVICES'
op|','
name|'ironic_fakes'
op|'.'
name|'SERVICES'
op|']'
newline|'\n'
name|'context'
op|'='
string|"'fake_context'"
newline|'\n'
nl|'\n'
comment|'# first call: all nodes'
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_all_host_states'
op|'('
name|'context'
op|')'
newline|'\n'
name|'host_states_map'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_map'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'host_states_map'
op|')'
op|','
number|'4'
op|')'
newline|'\n'
nl|'\n'
comment|'# second call: no nodes'
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_all_host_states'
op|'('
name|'context'
op|')'
newline|'\n'
name|'host_states_map'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'host_state_map'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'len'
op|'('
name|'host_states_map'
op|')'
op|','
number|'0'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_compute_node
dedent|''
name|'def'
name|'test_update_from_compute_node'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
string|'"fakehost"'
op|','
string|'"fakenode"'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'update'
op|'('
name|'compute'
op|'='
name|'self'
op|'.'
name|'compute_node'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1024'
op|','
name|'host'
op|'.'
name|'free_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1024'
op|','
name|'host'
op|'.'
name|'total_usable_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'10240'
op|','
name|'host'
op|'.'
name|'free_disk_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'host'
op|'.'
name|'vcpus_total'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'vcpus_used'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'compute_node'
op|'.'
name|'stats'
op|','
name|'host'
op|'.'
name|'stats'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'ironic'"
op|','
name|'host'
op|'.'
name|'hypervisor_type'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'host'
op|'.'
name|'hypervisor_version'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
string|"'fake_host'"
op|','
name|'host'
op|'.'
name|'hypervisor_hostname'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_consume_identical_instance_from_compute
dedent|''
name|'def'
name|'test_consume_identical_instance_from_compute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
string|'"fakehost"'
op|','
string|'"fakenode"'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'update'
op|'('
name|'compute'
op|'='
name|'self'
op|'.'
name|'compute_node'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
name|'spec_obj'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'flavor'
op|'='
name|'objects'
op|'.'
name|'Flavor'
op|'('
name|'root_gb'
op|'='
number|'10'
op|','
name|'ephemeral_gb'
op|'='
number|'0'
op|','
name|'memory_mb'
op|'='
number|'1024'
op|','
nl|'\n'
name|'vcpus'
op|'='
number|'1'
op|')'
op|','
nl|'\n'
name|'uuid'
op|'='
string|"'fake-uuid'"
op|')'
newline|'\n'
name|'host'
op|'.'
name|'consume_from_request'
op|'('
name|'spec_obj'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'host'
op|'.'
name|'vcpus_used'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_disk_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNotNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_consume_larger_instance_from_compute
dedent|''
name|'def'
name|'test_consume_larger_instance_from_compute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
string|'"fakehost"'
op|','
string|'"fakenode"'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'update'
op|'('
name|'compute'
op|'='
name|'self'
op|'.'
name|'compute_node'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
name|'spec_obj'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'flavor'
op|'='
name|'objects'
op|'.'
name|'Flavor'
op|'('
name|'root_gb'
op|'='
number|'20'
op|','
name|'ephemeral_gb'
op|'='
number|'0'
op|','
name|'memory_mb'
op|'='
number|'2048'
op|','
nl|'\n'
name|'vcpus'
op|'='
number|'2'
op|')'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'consume_from_request'
op|'('
name|'spec_obj'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'host'
op|'.'
name|'vcpus_used'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_disk_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNotNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_consume_smaller_instance_from_compute
dedent|''
name|'def'
name|'test_consume_smaller_instance_from_compute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'host'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
string|'"fakehost"'
op|','
string|'"fakenode"'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'update'
op|'('
name|'compute'
op|'='
name|'self'
op|'.'
name|'compute_node'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
name|'spec_obj'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'flavor'
op|'='
name|'objects'
op|'.'
name|'Flavor'
op|'('
name|'root_gb'
op|'='
number|'5'
op|','
name|'ephemeral_gb'
op|'='
number|'0'
op|','
name|'memory_mb'
op|'='
number|'512'
op|','
nl|'\n'
name|'vcpus'
op|'='
number|'1'
op|')'
op|')'
newline|'\n'
name|'host'
op|'.'
name|'consume_from_request'
op|'('
name|'spec_obj'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'host'
op|'.'
name|'vcpus_used'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_ram_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0'
op|','
name|'host'
op|'.'
name|'free_disk_mb'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsNotNone'
op|'('
name|'host'
op|'.'
name|'updated'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|IronicHostManagerTestFilters
dedent|''
dedent|''
name|'class'
name|'IronicHostManagerTestFilters'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Test filters work for IronicHostManager."""'
newline|'\n'
nl|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_instance_info'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_aggregates'"
op|')'
newline|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|','
name|'mock_init_agg'
op|','
name|'mock_init_inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'IronicHostManagerTestFilters'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'scheduler_available_filters'
op|'='
op|'['
string|"'%s.%s'"
op|'%'
op|'('
name|'__name__'
op|','
name|'cls'
op|')'
name|'for'
nl|'\n'
name|'cls'
name|'in'
op|'['
string|"'FakeFilterClass1'"
op|','
nl|'\n'
string|"'FakeFilterClass2'"
op|']'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'scheduler_default_filters'
op|'='
op|'['
string|"'FakeFilterClass1'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'baremetal_scheduler_default_filters'
op|'='
op|'['
string|"'FakeFilterClass2'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicHostManager'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'fake_hosts'
op|'='
op|'['
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
nl|'\n'
string|"'fake_host%s'"
op|'%'
name|'x'
op|','
string|"'fake-node'"
op|')'
name|'for'
name|'x'
name|'in'
name|'range'
op|'('
number|'1'
op|','
number|'5'
op|')'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'fake_hosts'
op|'+='
op|'['
name|'ironic_host_manager'
op|'.'
name|'IronicNodeState'
op|'('
nl|'\n'
string|"'fake_multihost'"
op|','
string|"'fake-node%s'"
op|'%'
name|'x'
op|')'
name|'for'
name|'x'
name|'in'
name|'range'
op|'('
number|'1'
op|','
number|'5'
op|')'
op|']'
newline|'\n'
nl|'\n'
DECL|member|test_default_filters
dedent|''
name|'def'
name|'test_default_filters'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'default_filters'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'default_filters'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'default_filters'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'default_filters'
op|'['
number|'0'
op|']'
op|','
name|'FakeFilterClass1'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_choose_host_filters_not_found
dedent|''
name|'def'
name|'test_choose_host_filters_not_found'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'SchedulerHostFilterNotFound'
op|','
nl|'\n'
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'_choose_host_filters'
op|','
nl|'\n'
string|"'FakeFilterClass3'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_choose_host_filters
dedent|''
name|'def'
name|'test_choose_host_filters'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Test we return 1 correct filter object'
nl|'\n'
indent|' '
name|'host_filters'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'_choose_host_filters'
op|'('
nl|'\n'
op|'['
string|"'FakeFilterClass2'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'host_filters'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'host_filters'
op|'['
number|'0'
op|']'
op|','
name|'FakeFilterClass2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_host_manager_default_filters
dedent|''
name|'def'
name|'test_host_manager_default_filters'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'default_filters'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'default_filters'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'default_filters'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'default_filters'
op|'['
number|'0'
op|']'
op|','
name|'FakeFilterClass1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_instance_info'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'host_manager'
op|'.'
name|'HostManager'
op|','
string|"'_init_aggregates'"
op|')'
newline|'\n'
DECL|member|test_host_manager_default_filters_uses_baremetal
name|'def'
name|'test_host_manager_default_filters_uses_baremetal'
op|'('
name|'self'
op|','
name|'mock_init_agg'
op|','
nl|'\n'
name|'mock_init_inst'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'scheduler_use_baremetal_filters'
op|'='
name|'True'
op|')'
newline|'\n'
name|'host_manager'
op|'='
name|'ironic_host_manager'
op|'.'
name|'IronicHostManager'
op|'('
op|')'
newline|'\n'
nl|'\n'
comment|'# ensure the defaults come from baremetal_scheduler_default_filters'
nl|'\n'
comment|'# and not scheduler_default_filters'
nl|'\n'
name|'default_filters'
op|'='
name|'host_manager'
op|'.'
name|'default_filters'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'default_filters'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertIsInstance'
op|'('
name|'default_filters'
op|'['
number|'0'
op|']'
op|','
name|'FakeFilterClass2'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_load_filters
dedent|''
name|'def'
name|'test_load_filters'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# without scheduler_use_baremetal_filters'
nl|'\n'
indent|' '
name|'filters'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'_load_filters'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
string|"'FakeFilterClass1'"
op|']'
op|','
name|'filters'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_load_filters_baremetal
dedent|''
name|'def'
name|'test_load_filters_baremetal'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# with scheduler_use_baremetal_filters'
nl|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'scheduler_use_baremetal_filters'
op|'='
name|'True'
op|')'
newline|'\n'
name|'filters'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'_load_filters'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
string|"'FakeFilterClass2'"
op|']'
op|','
name|'filters'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_mock_get_filtered_hosts
dedent|''
name|'def'
name|'_mock_get_filtered_hosts'
op|'('
name|'self'
op|','
name|'info'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'['
string|"'got_objs'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
name|'info'
op|'['
string|"'got_fprops'"
op|']'
op|'='
op|'['
op|']'
newline|'\n'
nl|'\n'
DECL|function|fake_filter_one
name|'def'
name|'fake_filter_one'
op|'('
name|'_self'
op|','
name|'obj'
op|','
name|'filter_props'
op|')'
op|':'
newline|'\n'
indent|' '
name|'info'
op|'['
string|"'got_objs'"
op|']'
op|'.'
name|'append'
op|'('
name|'obj'
op|')'
newline|'\n'
name|'info'
op|'['
string|"'got_fprops'"
op|']'
op|'.'
name|'append'
op|'('
name|'filter_props'
op|')'
newline|'\n'
name|'return'
name|'True'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'stub_out'
op|'('
name|'__name__'
op|'+'
string|"'.FakeFilterClass1._filter_one'"
op|','
nl|'\n'
name|'fake_filter_one'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_verify_result
dedent|''
name|'def'
name|'_verify_result'
op|'('
name|'self'
op|','
name|'info'
op|','
name|'result'
op|','
name|'filters'
op|'='
name|'True'
op|')'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'x'
name|'in'
name|'info'
op|'['
string|"'got_fprops'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'x'
op|','
name|'info'
op|'['
string|"'expected_fprops'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'if'
name|'filters'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
name|'info'
op|'['
string|"'expected_objs'"
op|']'
op|')'
op|','
name|'set'
op|'('
name|'info'
op|'['
string|"'got_objs'"
op|']'
op|')'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'set'
op|'('
name|'info'
op|'['
string|"'expected_objs'"
op|']'
op|')'
op|','
name|'set'
op|'('
name|'result'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts
dedent|''
name|'def'
name|'test_get_filtered_hosts'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_ignore
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_ignore'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
string|"'fake_host1'"
op|','
string|"'fake_host3'"
op|','
nl|'\n'
string|"'fake_host5'"
op|','
string|"'fake_multihost'"
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# [1] and [3] are host2 and host4'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'1'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'3'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_force_hosts
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_force_hosts'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_host1'"
op|','
string|"'fake_host3'"
op|','
string|"'fake_host5'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# [0] and [2] are host1 and host3'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'0'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'2'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_no_matching_force_hosts
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_no_matching_force_hosts'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_host5'"
op|','
string|"'fake_host6'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_ignore_and_force_hosts
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_ignore_and_force_hosts'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure ignore_hosts processed before force_hosts in host filters.'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
string|"'fake_host1'"
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_host3'"
op|','
string|"'fake_host1'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# only fake_host3 should be left.'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'2'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_force_host_and_many_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_force_host_and_many_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure all nodes returned for a host with many nodes'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_multihost'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'4'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'5'
op|']'
op|','
nl|'\n'
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'6'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'7'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_force_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_force_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
string|"'fake-node2'"
op|','
string|"'fake-node4'"
op|','
string|"'fake-node9'"
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# [5] is fake-node2, [7] is fake-node4'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'5'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'7'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_force_hosts_and_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_force_hosts_and_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure only overlapping results if both force host and node'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_host1'"
op|','
string|"'fake_multihost'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
string|"'fake-node2'"
op|','
string|"'fake-node9'"
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# [5] is fake-node2'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'5'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_force_hosts_and_wrong_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_force_hosts_and_wrong_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure non-overlapping force_node and force_host yield no result'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
string|"'fake_multihost'"
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
string|"'fake-node'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_ignore_hosts_and_force_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_ignore_hosts_and_force_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure ignore_hosts can coexist with force_nodes'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
string|"'fake_host1'"
op|','
string|"'fake_host2'"
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
string|"'fake-node4'"
op|','
string|"'fake-node2'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'5'
op|']'
op|','
name|'self'
op|'.'
name|'fake_hosts'
op|'['
number|'7'
op|']'
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes
dedent|''
name|'def'
name|'test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Ensure ignore_hosts is processed before force_nodes'
nl|'\n'
indent|' '
name|'fake_properties'
op|'='
name|'objects'
op|'.'
name|'RequestSpec'
op|'('
nl|'\n'
name|'instance_uuid'
op|'='
string|"'fake-uuid'"
op|','
nl|'\n'
name|'ignore_hosts'
op|'='
op|'['
string|"'fake_multihost'"
op|']'
op|','
nl|'\n'
name|'force_hosts'
op|'='
op|'['
op|']'
op|','
nl|'\n'
name|'force_nodes'
op|'='
op|'['
string|"'fake_node4'"
op|','
string|"'fake_node2'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'info'
op|'='
op|'{'
string|"'expected_objs'"
op|':'
op|'['
op|']'
op|','
nl|'\n'
string|"'expected_fprops'"
op|':'
name|'fake_properties'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'_mock_get_filtered_hosts'
op|'('
name|'info'
op|')'
newline|'\n'
nl|'\n'
name|'result'
op|'='
name|'self'
op|'.'
name|'host_manager'
op|'.'
name|'get_filtered_hosts'
op|'('
name|'self'
op|'.'
name|'fake_hosts'
op|','
nl|'\n'
name|'fake_properties'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_result'
op|'('
name|'info'
op|','
name|'result'
op|','
name|'False'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.793586 | 88 | 0.620814 | 6,958 | 46,671 | 4.008479 | 0.044697 | 0.160267 | 0.082464 | 0.082822 | 0.901832 | 0.869743 | 0.837331 | 0.81754 | 0.783658 | 0.756624 | 0 | 0.004339 | 0.091342 | 46,671 | 3,647 | 89 | 12.797094 | 0.653344 | 0 | 0 | 0.953386 | 0 | 0 | 0.385678 | 0.052688 | 0 | 0 | 0 | 0 | 0.014532 | 0 | null | null | 0.001645 | 0.002468 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
7194e61647ae08f129e0b9a4bba1c544fff93674 | 22,214 | py | Python | tests/test_parser.py | joshuaskelly/wick | a539621300c6c01a78007b92451906146620575b | [
"MIT"
] | 4 | 2018-08-21T22:11:53.000Z | 2020-04-13T14:30:30.000Z | tests/test_parser.py | joshuaskelly/wick | a539621300c6c01a78007b92451906146620575b | [
"MIT"
] | null | null | null | tests/test_parser.py | joshuaskelly/wick | a539621300c6c01a78007b92451906146620575b | [
"MIT"
] | 1 | 2021-12-20T19:16:30.000Z | 2021-12-20T19:16:30.000Z | import unittest
from wick.parser.common import Range
from wick.parser.parser import parse
class TestParser(unittest.TestCase):
def assertPositionsEqual(self, first, second, msg=None):
"""Fail if the two positions are not equal"""
self.assertEqual(first.line, second.line, 'Lines should be "equal"')
self.assertEqual(first.character, second.character, 'Characters should be "equal"')
def assertRangesEqual(self, first, second, msg=None):
"""Fail if the two ranges are not equal"""
self.assertPositionsEqual(first.start, second.start, 'Starts should be "equal"')
self.assertPositionsEqual(first.end, second.end, 'Ends should be "equal"')
def assertSymbolsEqual(self, first, second, msg=None):
self.assertEqual(first.type, second.type, 'Types should be "equal"')
self.assertEqual(first.value, second.value, 'Values should be "equal"')
self.assertRangesEqual(first.range, second.range, 'Ranges should be "equal"')
def get_symbol(self, scope, symbol_value):
matches = [s for s in scope.definitions.values() if s.value == symbol_value]
return matches[0] if matches else None
def test_empty_struct(self):
source_text = 'struct empty;'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'empty')
self.assertEqual(symbol.value, 'empty', 'Value should be "empty"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNone(symbol.inner_scope, 'Inner scope should be "None"')
self.assertRangesEqual(symbol.range, Range((0, 7), (0, 12)))
def test_single_char_member(self):
source_text = 'struct single { char x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 21), (0, 22)))
def test_single_signed_char_member(self):
source_text = 'struct single { signed char x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'signed char', 'Type should be "signed char"')
self.assertRangesEqual(symbol.range, Range((0, 28), (0, 29)))
def test_single_unsigned_char_member(self):
source_text = 'struct single { unsigned char x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'unsigned char', 'Type should be "unsigned "char')
self.assertRangesEqual(symbol.range, Range((0, 30), (0, 31)))
def test_single_bool_member(self):
source_text = 'struct single { bool x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'bool', 'Type should be "bool"')
self.assertRangesEqual(symbol.range, Range((0, 21), (0, 22)))
def test_single_short_member(self):
source_text = 'struct single { short x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'short', 'Type should be "short"')
self.assertRangesEqual(symbol.range, Range((0, 22), (0, 23)))
def test_single_unsigned_short_member(self):
source_text = 'struct single { unsigned short x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'unsigned short', 'Type should be "unsigned "short')
self.assertRangesEqual(symbol.range, Range((0, 31), (0, 32)))
def test_single_int_member(self):
source_text = 'struct single { int x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'int', 'Type should be "int"')
self.assertRangesEqual(symbol.range, Range((0, 20), (0, 21)))
def test_single_unsigned_int_member(self):
source_text = 'struct single { unsigned int x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'unsigned int', 'Type should be "unsigned "int')
self.assertRangesEqual(symbol.range, Range((0, 29), (0, 30)))
def test_single_long_long_member(self):
source_text = 'struct single { long long x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'long long', 'Type should be "long "long')
self.assertRangesEqual(symbol.range, Range((0, 26), (0, 27)))
def test_single_unsigned_long_long_member(self):
source_text = 'struct single { unsigned long long x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'unsigned long long', 'Type should be "unsigned "long long')
self.assertRangesEqual(symbol.range, Range((0, 35), (0, 36)))
def test_single_long_member(self):
source_text = 'struct single { long x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'long', 'Type should be "long"')
self.assertRangesEqual(symbol.range, Range((0, 21), (0, 22)))
def test_single_unsigned_long_member(self):
source_text = 'struct single { unsigned long x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'unsigned long', 'Type should be "unsigned "long')
self.assertRangesEqual(symbol.range, Range((0, 30), (0, 31)))
def test_single_float_member(self):
source_text = 'struct single { float x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'float', 'Type should be "float"')
self.assertRangesEqual(symbol.range, Range((0, 22), (0, 23)))
def test_single_double_member(self):
source_text = 'struct single { double x; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
symbol = self.get_symbol(parse_tree.scope, 'single')
self.assertEqual(symbol.value, 'single', 'Value should be "single"')
self.assertEqual(symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'double', 'Type should be "double"')
self.assertRangesEqual(symbol.range, Range((0, 23), (0, 24)))
def test_multiple_char_members(self):
source_text = 'struct multi { char x; char y; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'multi')
self.assertEqual(struct_symbol.value, 'multi', 'Value should be "multi"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(struct_symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 20), (0, 21)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'y')
self.assertEqual(symbol.value, 'y', 'Value should be "y"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 28), (0, 29)))
def test_multiple_comma_separated_char_members(self):
source_text = 'struct multi { char x, y; };'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'multi')
self.assertEqual(struct_symbol.value, 'multi', 'Value should be "multi"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(struct_symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 20), (0, 21)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'y')
self.assertEqual(symbol.value, 'y', 'Value should be "y"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 23), (0, 24)))
def test_typedef_struct(self):
source_text = 'typedef struct td { char x; char y; }alias;'
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'td')
self.assertEqual(struct_symbol.value, 'td', 'Value should be "td"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
symbol = self.get_symbol(struct_symbol.inner_scope, 'x')
self.assertEqual(symbol.value, 'x', 'Value should be "x"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 25), (0, 26)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'y')
self.assertEqual(symbol.value, 'y', 'Value should be "y"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((0, 33), (0, 34)))
alias = struct_symbol.alias
self.assertEqual(alias.value, 'alias', 'Alias value should be "alias"')
self.assertEqual(alias.type.value, 'struct', 'Type should be "struct"')
def test_multiple_structs(self):
source_text = """
struct A {
int a;
};
struct B {
long b;
};
"""
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'A')
self.assertEqual(struct_symbol.value, 'A', 'Value should be "A"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
self.assertRangesEqual(struct_symbol.range, Range((1, 7), (1, 8)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'a')
self.assertEqual(symbol.value, 'a', 'Value should be "a"')
self.assertEqual(symbol.type.value, 'int', 'Type should be "int"')
self.assertRangesEqual(symbol.range, Range((2, 8), (2, 9)))
struct_symbol = self.get_symbol(parse_tree.scope, 'B')
self.assertEqual(struct_symbol.value, 'B', 'Value should be "B"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
self.assertRangesEqual(struct_symbol.range, Range((5, 7), (5, 8)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'b')
self.assertEqual(symbol.value, 'b', 'Value should be "b"')
self.assertEqual(symbol.type.value, 'long', 'Type should be "long"')
self.assertRangesEqual(symbol.range, Range((6, 9), (6, 10)))
def test_comments(self):
source_text = """
/* Description of A */
struct A {
// Description of a
int a;
// This is a description
// of b. It is two lines
char b;
};
"""
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'A')
self.assertEqual(struct_symbol.value, 'A', 'Value should be "A"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
self.assertRangesEqual(struct_symbol.range, Range((2, 7), (2, 8)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'a')
self.assertEqual(symbol.value, 'a', 'Value should be "a"')
self.assertEqual(symbol.type.value, 'int', 'Type should be "int"')
self.assertRangesEqual(symbol.range, Range((4, 8), (4, 9)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'b')
self.assertEqual(symbol.value, 'b', 'Value should be "b"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((8, 9), (8, 10)))
comment = parse_tree.comments[0]
self.assertEqual(comment.value, '/* Description of A */', 'Comment text should not change')
self.assertRangesEqual(comment.range, Range((1, 0), (1, 22)))
comment = parse_tree.comments[1]
self.assertEqual(comment.value, '// Description of a', 'Comment text should not change')
self.assertRangesEqual(comment.range, Range((3, 4), (3, 23)))
comment = parse_tree.comments[2]
self.assertEqual(comment.value, '// This is a description\n // of b. It is two lines')
self.assertRangesEqual(comment.range, Range((6, 4), (7, 28)))
def test_c_strings(self):
source_text = """
struct A {
char name[32];
};"""
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'A')
self.assertEqual(struct_symbol.value, 'A', 'Value should be "A"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
self.assertRangesEqual(struct_symbol.range, Range((1, 7), (1, 8)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'name')
self.assertEqual(symbol.value, 'name', 'Value should be "name"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((2, 9), (2, 13)))
self.assertIsNotNone(symbol.dimension, 'Dimension should not be None')
self.assertEqual(symbol.dimension.value, '32', 'Dimension should be "32"')
def test_typedef_alias_only(self):
source_text = """
typedef struct {
char name[32];
}A;"""
parse_tree = parse(source_text)
self.assertFalse(parse_tree.errors, 'Errors during parsing')
struct_symbol = self.get_symbol(parse_tree.scope, 'A')
self.assertEqual(struct_symbol.value, 'A', 'Value should be "A"')
self.assertEqual(struct_symbol.type.value, 'struct', 'Type should be "struct"')
self.assertIsNotNone(struct_symbol.inner_scope, 'Inner scope should not be None')
self.assertRangesEqual(struct_symbol.range, Range((3, 1), (3, 2)))
symbol = self.get_symbol(struct_symbol.inner_scope, 'name')
self.assertEqual(symbol.value, 'name', 'Value should be "name"')
self.assertEqual(symbol.type.value, 'char', 'Type should be "char"')
self.assertRangesEqual(symbol.range, Range((2, 9), (2, 13)))
self.assertIsNotNone(symbol.dimension, 'Dimension should not be None')
self.assertEqual(symbol.dimension.value, '32', 'Dimension should be "32"')
def test_dont_parse_variable_declarations_outside_a_struct(self):
source_text = """float d;"""
parse_tree = parse(source_text)
self.assertEqual(len(parse_tree.errors), 3, 'Three errors should be present')
if __name__ == '__main__':
unittest.main()
| 47.669528 | 104 | 0.664806 | 2,851 | 22,214 | 5.05577 | 0.050509 | 0.061607 | 0.122381 | 0.06459 | 0.875468 | 0.8575 | 0.834258 | 0.806091 | 0.791453 | 0.772652 | 0 | 0.011518 | 0.198749 | 22,214 | 465 | 105 | 47.772043 | 0.798303 | 0.003421 | 0 | 0.669516 | 0 | 0 | 0.237221 | 0 | 0 | 0 | 0 | 0 | 0.564103 | 1 | 0.076923 | false | 0 | 0.008547 | 0 | 0.091168 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
71d72b30fe55e858fee3d9bd3a38327089176e7b | 5,863 | py | Python | swae/models/mnist.py | vitchyr/swae-pytorch | c59cbffc3f093b98084d56271d4743072e9bf5d5 | [
"MIT"
] | 89 | 2018-06-06T02:14:14.000Z | 2022-03-04T17:06:02.000Z | swae/models/mnist.py | vitchyr/swae-pytorch | c59cbffc3f093b98084d56271d4743072e9bf5d5 | [
"MIT"
] | 5 | 2018-07-20T16:31:53.000Z | 2019-09-05T16:39:37.000Z | swae/models/mnist.py | vitchyr/swae-pytorch | c59cbffc3f093b98084d56271d4743072e9bf5d5 | [
"MIT"
] | 13 | 2018-06-28T01:38:01.000Z | 2021-09-14T11:47:13.000Z | import torch.nn as nn
import torch.nn.functional as F
class MNISTEncoder(nn.Module):
""" MNIST Encoder from Original Paper's Keras based Implementation.
Args:
init_num_filters (int): initial number of filters from encoder image channels
lrelu_slope (float): positive number indicating LeakyReLU negative slope
inter_fc_dim (int): intermediate fully connected dimensionality prior to embedding layer
embedding_dim (int): embedding dimensionality
"""
def __init__(self, init_num_filters=16, lrelu_slope=0.2, inter_fc_dim=128, embedding_dim=2):
super(MNISTEncoder, self).__init__()
self.init_num_filters_ = init_num_filters
self.lrelu_slope_ = lrelu_slope
self.inter_fc_dim_ = inter_fc_dim
self.embedding_dim_ = embedding_dim
self.features = nn.Sequential(
nn.Conv2d(1, self.init_num_filters_ * 1, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 1, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.AvgPool2d(kernel_size=2, padding=0),
nn.Conv2d(self.init_num_filters_ * 1, self.init_num_filters_ * 2, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 2, self.init_num_filters_ * 2, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.AvgPool2d(kernel_size=2, padding=0),
nn.Conv2d(self.init_num_filters_ * 2, self.init_num_filters_ * 4, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 4, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.AvgPool2d(kernel_size=2, padding=1)
)
self.fc = nn.Sequential(
nn.Linear(self.init_num_filters_ * 4 * 4 * 4, self.inter_fc_dim_),
nn.ReLU(inplace=True),
nn.Linear(self.inter_fc_dim_, self.embedding_dim_)
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, self.init_num_filters_ * 4 * 4 * 4)
x = self.fc(x)
return x
class MNISTDecoder(nn.Module):
""" MNIST Decoder from Original Paper's Keras based Implementation.
Args:
init_num_filters (int): initial number of filters from encoder image channels
lrelu_slope (float): positive number indicating LeakyReLU negative slope
inter_fc_dim (int): intermediate fully connected dimensionality prior to embedding layer
embedding_dim (int): embedding dimensionality
"""
def __init__(self, init_num_filters=16, lrelu_slope=0.2, inter_fc_dim=128, embedding_dim=2):
super(MNISTDecoder, self).__init__()
self.init_num_filters_ = init_num_filters
self.lrelu_slope_ = lrelu_slope
self.inter_fc_dim_ = inter_fc_dim
self.embedding_dim_ = embedding_dim
self.fc = nn.Sequential(
nn.Linear(self.embedding_dim_, self.inter_fc_dim_),
nn.Linear(self.inter_fc_dim_, self.init_num_filters_ * 4 * 4 * 4),
nn.ReLU(inplace=True)
)
self.features = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 4, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 4, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 4, kernel_size=3, padding=0),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 4, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(self.init_num_filters_ * 4, self.init_num_filters_ * 2, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 2, self.init_num_filters_ * 2, kernel_size=3, padding=1),
nn.LeakyReLU(self.lrelu_slope_, inplace=True),
nn.Conv2d(self.init_num_filters_ * 2, 1, kernel_size=3, padding=1)
)
def forward(self, z):
z = self.fc(z)
z = z.view(-1, 4 * self.init_num_filters_, 4, 4)
z = self.features(z)
return F.sigmoid(z)
class MNISTAutoencoder(nn.Module):
""" MNIST Autoencoder from Original Paper's Keras based Implementation.
Args:
init_num_filters (int): initial number of filters from encoder image channels
lrelu_slope (float): positive number indicating LeakyReLU negative slope
inter_fc_dim (int): intermediate fully connected dimensionality prior to embedding layer
embedding_dim (int): embedding dimensionality
"""
def __init__(self, init_num_filters=16, lrelu_slope=0.2, inter_fc_dim=128, embedding_dim=2):
super(MNISTAutoencoder, self).__init__()
self.init_num_filters_ = init_num_filters
self.lrelu_slope_ = lrelu_slope
self.inter_fc_dim_ = inter_fc_dim
self.embedding_dim_ = embedding_dim
self.encoder = MNISTEncoder(init_num_filters, lrelu_slope, inter_fc_dim, embedding_dim)
self.decoder = MNISTDecoder(init_num_filters, lrelu_slope, inter_fc_dim, embedding_dim)
def forward(self, x):
z = self.encoder(x)
return self.decoder(z), z
| 43.110294 | 104 | 0.662289 | 797 | 5,863 | 4.542033 | 0.100376 | 0.081215 | 0.162431 | 0.169061 | 0.883425 | 0.862155 | 0.839227 | 0.794199 | 0.794199 | 0.794199 | 0 | 0.025802 | 0.239809 | 5,863 | 135 | 105 | 43.42963 | 0.786403 | 0.19137 | 0 | 0.53012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072289 | false | 0 | 0.024096 | 0 | 0.168675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
71de53f5bc6fed510d60e9169075a45b5971237e | 207 | py | Python | src/mempool.py | lenechains/arbor-blockchain | 0563ecc4d978335604003106fbdf3a04e33d154d | [
"MIT"
] | null | null | null | src/mempool.py | lenechains/arbor-blockchain | 0563ecc4d978335604003106fbdf3a04e33d154d | [
"MIT"
] | null | null | null | src/mempool.py | lenechains/arbor-blockchain | 0563ecc4d978335604003106fbdf3a04e33d154d | [
"MIT"
] | null | null | null | class Mempool:
def __init__(self):
self.mempool = {}
def add(self, tx):
self.mempool[tx.header['tx_id']] = tx
def remove(self, tx):
del self.mempool[tx.header['tx_id']]
| 20.7 | 45 | 0.570048 | 29 | 207 | 3.862069 | 0.37931 | 0.294643 | 0.232143 | 0.339286 | 0.410714 | 0.410714 | 0 | 0 | 0 | 0 | 0 | 0 | 0.270531 | 207 | 9 | 46 | 23 | 0.741722 | 0 | 0 | 0 | 0 | 0 | 0.048309 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
e0f76ae47faee21176be2c186dc7636c89512b6d | 14,789 | py | Python | tensorlayer/layers/convolution/simplified_conv.py | KuKuXia/tensorlayer | 654de4a37892cde54495350f99f5f3b38b2c6eb3 | [
"Apache-2.0"
] | null | null | null | tensorlayer/layers/convolution/simplified_conv.py | KuKuXia/tensorlayer | 654de4a37892cde54495350f99f5f3b38b2c6eb3 | [
"Apache-2.0"
] | null | null | null | tensorlayer/layers/convolution/simplified_conv.py | KuKuXia/tensorlayer | 654de4a37892cde54495350f99f5f3b38b2c6eb3 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorlayer as tl
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
from tensorlayer.layers.utils import get_collection_trainable
__all__ = [
'Conv1d',
'Conv2d',
'Conv3d',
]
class Conv1d(Layer):
"""Simplified version of :class:`Conv1dLayer`.
Parameters
----------
n_filter : int
The number of filters
filter_size : int
The filter size
stride : int
The stride step
dilation_rate : int
Specifying the dilation rate to use for dilated convolution.
act : activation function
The function that is applied to the layer activations
padding : str
The padding algorithm type: "SAME" or "VALID".
data_format : str
"channel_last" (NWC, default) or "channels_first" (NCW).
W_init : initializer
The initializer for the weight matrix.
b_init : initializer or None
The initializer for the bias vector. If None, skip biases.
in_channels : int
The number of in channels.
name : None or str
A unique layer name
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([8, 100, 1], name='input')
>>> conv1d = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, b_init=None, in_channels=1, name='conv1d_1')
>>> print(conv1d)
>>> tensor = tl.layers.Conv1d(n_filter=32, filter_size=5, stride=2, act=tf.nn.relu, name='conv1d_2')(net)
>>> print(tensor)
"""
def __init__(
self,
n_filter=32,
filter_size=5,
stride=1,
act=None,
padding='SAME',
data_format="channels_last",
dilation_rate=1,
W_init=tl.initializers.truncated_normal(stddev=0.02),
b_init=tl.initializers.constant(value=0.0),
in_channels=None,
name=None # 'conv1d'
):
super().__init__(name, act=act)
self.n_filter = n_filter
self.filter_size = filter_size
self.stride = stride
self.padding = padding
self.data_format = data_format
self.dilation_rate = dilation_rate
self.W_init = W_init
self.b_init = b_init
self.in_channels = in_channels
if self.in_channels:
self.build(None)
self._built = True
logging.info(
"Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s" % (
self.name, n_filter, filter_size, stride, padding,
self.act.__name__ if self.act is not None else 'No Activation'
)
)
def __repr__(self):
actstr = self.act.__name__ if self.act is not None else 'No Activation'
s = (
'{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}'
', stride={stride}, padding={padding}'
)
if self.dilation_rate != 1:
s += ', dilation={dilation_rate}'
if self.b_init is None:
s += ', bias=False'
s += (', ' + actstr)
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
if self.data_format == 'channels_last':
self.data_format = 'NWC'
if self.in_channels is None:
self.in_channels = inputs_shape[-1]
elif self.data_format == 'channels_first':
self.data_format = 'NCW'
if self.in_channels is None:
self.in_channels = inputs_shape[1]
else:
raise Exception("data_format should be either channels_last or channels_first")
self.filter_shape = (self.filter_size, self.in_channels, self.n_filter)
# TODO : check
self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init)
if self.b_init:
self.b = self._get_weights("biases", shape=(self.n_filter), init=self.b_init)
def forward(self, inputs):
outputs = tf.nn.conv1d(
input=inputs,
filters=self.W,
stride=self.stride,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
)
if self.b_init:
outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
if self.act:
outputs = self.act(outputs)
return outputs
class Conv2d(Layer):
"""Simplified version of :class:`Conv2dLayer`.
Parameters
----------
n_filter : int
The number of filters.
filter_size : tuple of int
The filter size (height, width).
strides : tuple of int
The sliding window strides of corresponding input dimensions.
It must be in the same order as the ``shape`` parameter.
dilation_rate : tuple of int
Specifying the dilation rate to use for dilated convolution.
act : activation function
The activation function of this layer.
padding : str
The padding algorithm type: "SAME" or "VALID".
data_format : str
"channels_last" (NHWC, default) or "channels_first" (NCHW).
W_init : initializer
The initializer for the the weight matrix.
b_init : initializer or None
The initializer for the the bias vector. If None, skip biases.
in_channels : int
The number of in channels.
name : None or str
A unique layer name.
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([8, 400, 400, 3], name='input')
>>> conv2d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), stride=(2, 2), b_init=None, in_channels=3, name='conv2d_1')
>>> print(conv2d)
>>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3), stride=(2, 2), act=tf.nn.relu, name='conv2d_2')(net)
>>> print(tensor)
"""
def __init__(
self,
n_filter=32,
filter_size=(3, 3),
strides=(1, 1),
act=None,
padding='SAME',
data_format='channels_last',
dilation_rate=(1, 1),
W_init=tl.initializers.truncated_normal(stddev=0.02),
b_init=tl.initializers.constant(value=0.0),
in_channels=None,
name=None # 'conv2d',
):
super().__init__(name, act=act)
self.n_filter = n_filter
self.filter_size = filter_size
self._strides = self.strides = strides
self.padding = padding
self.data_format = data_format
self._dilation_rate = self.dilation_rate = dilation_rate
self.W_init = W_init
self.b_init = b_init
self.in_channels = in_channels
if self.in_channels:
self.build(None)
self._built = True
logging.info(
"Conv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % (
self.name, n_filter, str(filter_size), str(strides), padding,
self.act.__name__ if self.act is not None else 'No Activation'
)
)
def __repr__(self):
actstr = self.act.__name__ if self.act is not None else 'No Activation'
s = (
'{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}'
', strides={strides}, padding={padding}'
)
if self.dilation_rate != (1, ) * len(self.dilation_rate):
s += ', dilation={dilation_rate}'
if self.b_init is None:
s += ', bias=False'
s += (', ' + actstr)
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
if self.data_format == 'channels_last':
self.data_format = 'NHWC'
if self.in_channels is None:
self.in_channels = inputs_shape[-1]
self._strides = [1, self._strides[0], self._strides[1], 1]
self._dilation_rate = [1, self._dilation_rate[0], self._dilation_rate[1], 1]
elif self.data_format == 'channels_first':
self.data_format = 'NCHW'
if self.in_channels is None:
self.in_channels = inputs_shape[1]
self._strides = [1, 1, self._strides[0], self._strides[1]]
self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1]]
else:
raise Exception("data_format should be either channels_last or channels_first")
self.filter_shape = (self.filter_size[0], self.filter_size[1], self.in_channels, self.n_filter)
self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init)
if self.b_init:
self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init)
def forward(self, inputs):
outputs = tf.nn.conv2d(
input=inputs,
filters=self.W,
strides=self._strides,
padding=self.padding,
data_format=self.data_format, #'NHWC',
dilations=self._dilation_rate, #[1, 1, 1, 1],
name=self.name,
)
if self.b_init:
outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
if self.act:
outputs = self.act(outputs)
return outputs
class Conv3d(Layer):
"""Simplified version of :class:`Conv3dLayer`.
Parameters
----------
n_filter : int
The number of filters.
filter_size : tuple of int
The filter size (height, width).
strides : tuple of int
The sliding window strides of corresponding input dimensions.
It must be in the same order as the ``shape`` parameter.
dilation_rate : tuple of int
Specifying the dilation rate to use for dilated convolution.
act : activation function
The activation function of this layer.
padding : str
The padding algorithm type: "SAME" or "VALID".
data_format : str
"channels_last" (NDHWC, default) or "channels_first" (NCDHW).
W_init : initializer
The initializer for the the weight matrix.
b_init : initializer or None
The initializer for the the bias vector. If None, skip biases.
in_channels : int
The number of in channels.
name : None or str
A unique layer name.
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([8, 20, 20, 20, 3], name='input')
>>> conv3d = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3, 3), stride=(2, 2, 2), b_init=None, in_channels=3, name='conv3d_1')
>>> print(conv3d)
>>> tensor = tl.layers.Conv2d(n_filter=32, filter_size=(3, 3, 3), stride=(2, 2, 2), act=tf.nn.relu, name='conv3d_2')(net)
>>> print(tensor)
"""
def __init__(
self,
n_filter=32,
filter_size=(3, 3, 3),
strides=(1, 1, 1),
act=None,
padding='SAME',
data_format='channels_last',
dilation_rate=(1, 1, 1),
W_init=tl.initializers.truncated_normal(stddev=0.02),
b_init=tl.initializers.constant(value=0.0),
in_channels=None,
name=None # 'conv3d',
):
super().__init__(name, act=act)
self.n_filter = n_filter
self.filter_size = filter_size
self._strides = self.strides = strides
self.padding = padding
self.data_format = data_format
self._dilation_rate = self.dilation_rate = dilation_rate
self.W_init = W_init
self.b_init = b_init
self.in_channels = in_channels
if self.in_channels:
self.build(None)
self._built = True
logging.info(
"Conv3d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % (
self.name, n_filter, str(filter_size), str(strides), padding,
self.act.__name__ if self.act is not None else 'No Activation'
)
)
def __repr__(self):
actstr = self.act.__name__ if self.act is not None else 'No Activation'
s = (
'{classname}(in_channels={in_channels}, out_channels={n_filter}, kernel_size={filter_size}'
', strides={strides}, padding={padding}'
)
if self.dilation_rate != (1, ) * len(self.dilation_rate):
s += ', dilation={dilation_rate}'
if self.b_init is None:
s += ', bias=False'
s += (', ' + actstr)
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
if self.data_format == 'channels_last':
self.data_format = 'NDHWC'
if self.in_channels is None:
self.in_channels = inputs_shape[-1]
self._strides = [1, self._strides[0], self._strides[1], self._strides[2], 1]
self._dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], self.dilation_rate[2], 1]
elif self.data_format == 'channels_first':
self.data_format = 'NCDHW'
if self.in_channels is None:
self.in_channels = inputs_shape[1]
self._strides = [1, 1, self._strides[0], self._strides[1], self._strides[2]]
self._dilation_rate = [1, 1, self._dilation_rate[0], self._dilation_rate[1], self._dilation_rate[2]]
else:
raise Exception("data_format should be either channels_last or channels_first")
self.filter_shape = (
self.filter_size[0], self.filter_size[1], self.filter_size[2], self.in_channels, self.n_filter
)
self.W = self._get_weights("filters", shape=self.filter_shape, init=self.W_init)
if self.b_init:
self.b = self._get_weights("biases", shape=(self.n_filter, ), init=self.b_init)
def forward(self, inputs):
outputs = tf.nn.conv3d(
input=inputs,
filters=self.W,
strides=self._strides,
padding=self.padding,
data_format=self.data_format, #'NDHWC',
dilations=self._dilation_rate, #[1, 1, 1, 1, 1],
name=self.name,
)
if self.b_init:
outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
if self.act:
outputs = self.act(outputs)
return outputs
| 35.808717 | 132 | 0.586517 | 1,899 | 14,789 | 4.339652 | 0.089521 | 0.061158 | 0.052421 | 0.026817 | 0.898313 | 0.883145 | 0.875986 | 0.861061 | 0.846863 | 0.841039 | 0 | 0.019355 | 0.297789 | 14,789 | 412 | 133 | 35.895631 | 0.774194 | 0.253296 | 0 | 0.732558 | 0 | 0.011628 | 0.115102 | 0.031391 | 0 | 0 | 0 | 0.002427 | 0 | 1 | 0.046512 | false | 0 | 0.023256 | 0 | 0.104651 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1ca83bac4be5b07d0d52903134745e8b094036ee | 35,630 | py | Python | sdk/python/pulumi_rancher2/app.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-03-23T15:59:11.000Z | 2021-01-29T00:37:32.000Z | sdk/python/pulumi_rancher2/app.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 76 | 2020-01-16T20:00:25.000Z | 2022-03-31T20:30:08.000Z | sdk/python/pulumi_rancher2/app.py | pulumi/pulumi-rancher2 | 7a98af8cf598b711084a7f46c0fe71b43ed7a8ac | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-03-27T17:39:59.000Z | 2020-11-24T23:09:24.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AppArgs', 'App']
@pulumi.input_type
class AppArgs:
def __init__(__self__, *,
catalog_name: pulumi.Input[str],
project_id: pulumi.Input[str],
target_namespace: pulumi.Input[str],
template_name: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
answers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
force_upgrade: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
revision_id: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
values_yaml: Optional[pulumi.Input[str]] = None,
wait: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a App resource.
:param pulumi.Input[str] catalog_name: Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
:param pulumi.Input[str] project_id: The project id where the app will be installed (string)
:param pulumi.Input[str] target_namespace: The namespace id where the app will be installed (string)
:param pulumi.Input[str] template_name: Template name of the app. If modified, app will be upgraded (string)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for App object (map)
:param pulumi.Input[Mapping[str, Any]] answers: Answers for the app template. If modified, app will be upgraded (map)
:param pulumi.Input[str] description: Description for the app (string)
:param pulumi.Input[bool] force_upgrade: Force app upgrade (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for App object (map)
:param pulumi.Input[str] name: The name of the app (string)
:param pulumi.Input[str] revision_id: Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
:param pulumi.Input[str] template_version: Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
:param pulumi.Input[str] values_yaml: values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
:param pulumi.Input[bool] wait: Wait until app is deployed and active. Default: `true` (bool)
"""
pulumi.set(__self__, "catalog_name", catalog_name)
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "target_namespace", target_namespace)
pulumi.set(__self__, "template_name", template_name)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if answers is not None:
pulumi.set(__self__, "answers", answers)
if description is not None:
pulumi.set(__self__, "description", description)
if force_upgrade is not None:
pulumi.set(__self__, "force_upgrade", force_upgrade)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if revision_id is not None:
pulumi.set(__self__, "revision_id", revision_id)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if values_yaml is not None:
pulumi.set(__self__, "values_yaml", values_yaml)
if wait is not None:
pulumi.set(__self__, "wait", wait)
@property
@pulumi.getter(name="catalogName")
def catalog_name(self) -> pulumi.Input[str]:
"""
Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
"""
return pulumi.get(self, "catalog_name")
@catalog_name.setter
def catalog_name(self, value: pulumi.Input[str]):
pulumi.set(self, "catalog_name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project id where the app will be installed (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> pulumi.Input[str]:
"""
The namespace id where the app will be installed (string)
"""
return pulumi.get(self, "target_namespace")
@target_namespace.setter
def target_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "target_namespace", value)
@property
@pulumi.getter(name="templateName")
def template_name(self) -> pulumi.Input[str]:
"""
Template name of the app. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "template_name")
@template_name.setter
def template_name(self, value: pulumi.Input[str]):
pulumi.set(self, "template_name", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for App object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def answers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Answers for the app template. If modified, app will be upgraded (map)
"""
return pulumi.get(self, "answers")
@answers.setter
def answers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "answers", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the app (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="forceUpgrade")
def force_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Force app upgrade (string)
"""
return pulumi.get(self, "force_upgrade")
@force_upgrade.setter
def force_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_upgrade", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for App object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the app (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="revisionId")
def revision_id(self) -> Optional[pulumi.Input[str]]:
"""
Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
"""
return pulumi.get(self, "revision_id")
@revision_id.setter
def revision_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_id", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="valuesYaml")
def values_yaml(self) -> Optional[pulumi.Input[str]]:
"""
values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "values_yaml")
@values_yaml.setter
def values_yaml(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values_yaml", value)
@property
@pulumi.getter
def wait(self) -> Optional[pulumi.Input[bool]]:
"""
Wait until app is deployed and active. Default: `true` (bool)
"""
return pulumi.get(self, "wait")
@wait.setter
def wait(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait", value)
@pulumi.input_type
class _AppState:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
answers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
catalog_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
external_id: Optional[pulumi.Input[str]] = None,
force_upgrade: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
revision_id: Optional[pulumi.Input[str]] = None,
target_namespace: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
values_yaml: Optional[pulumi.Input[str]] = None,
wait: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering App resources.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for App object (map)
:param pulumi.Input[Mapping[str, Any]] answers: Answers for the app template. If modified, app will be upgraded (map)
:param pulumi.Input[str] catalog_name: Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
:param pulumi.Input[str] description: Description for the app (string)
:param pulumi.Input[str] external_id: (Computed) The url of the app template on a catalog (string)
:param pulumi.Input[bool] force_upgrade: Force app upgrade (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for App object (map)
:param pulumi.Input[str] name: The name of the app (string)
:param pulumi.Input[str] project_id: The project id where the app will be installed (string)
:param pulumi.Input[str] revision_id: Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
:param pulumi.Input[str] target_namespace: The namespace id where the app will be installed (string)
:param pulumi.Input[str] template_name: Template name of the app. If modified, app will be upgraded (string)
:param pulumi.Input[str] template_version: Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
:param pulumi.Input[str] values_yaml: values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
:param pulumi.Input[bool] wait: Wait until app is deployed and active. Default: `true` (bool)
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if answers is not None:
pulumi.set(__self__, "answers", answers)
if catalog_name is not None:
pulumi.set(__self__, "catalog_name", catalog_name)
if description is not None:
pulumi.set(__self__, "description", description)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if force_upgrade is not None:
pulumi.set(__self__, "force_upgrade", force_upgrade)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if revision_id is not None:
pulumi.set(__self__, "revision_id", revision_id)
if target_namespace is not None:
pulumi.set(__self__, "target_namespace", target_namespace)
if template_name is not None:
pulumi.set(__self__, "template_name", template_name)
if template_version is not None:
pulumi.set(__self__, "template_version", template_version)
if values_yaml is not None:
pulumi.set(__self__, "values_yaml", values_yaml)
if wait is not None:
pulumi.set(__self__, "wait", wait)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for App object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def answers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Answers for the app template. If modified, app will be upgraded (map)
"""
return pulumi.get(self, "answers")
@answers.setter
def answers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "answers", value)
@property
@pulumi.getter(name="catalogName")
def catalog_name(self) -> Optional[pulumi.Input[str]]:
"""
Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
"""
return pulumi.get(self, "catalog_name")
@catalog_name.setter
def catalog_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "catalog_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the app (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The url of the app template on a catalog (string)
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter(name="forceUpgrade")
def force_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Force app upgrade (string)
"""
return pulumi.get(self, "force_upgrade")
@force_upgrade.setter
def force_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_upgrade", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for App object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the app (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project id where the app will be installed (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="revisionId")
def revision_id(self) -> Optional[pulumi.Input[str]]:
"""
Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
"""
return pulumi.get(self, "revision_id")
@revision_id.setter
def revision_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_id", value)
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace id where the app will be installed (string)
"""
return pulumi.get(self, "target_namespace")
@target_namespace.setter
def target_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_namespace", value)
@property
@pulumi.getter(name="templateName")
def template_name(self) -> Optional[pulumi.Input[str]]:
"""
Template name of the app. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "template_name")
@template_name.setter
def template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_name", value)
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> Optional[pulumi.Input[str]]:
"""
Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
"""
return pulumi.get(self, "template_version")
@template_version.setter
def template_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_version", value)
@property
@pulumi.getter(name="valuesYaml")
def values_yaml(self) -> Optional[pulumi.Input[str]]:
"""
values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "values_yaml")
@values_yaml.setter
def values_yaml(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values_yaml", value)
@property
@pulumi.getter
def wait(self) -> Optional[pulumi.Input[bool]]:
"""
Wait until app is deployed and active. Default: `true` (bool)
"""
return pulumi.get(self, "wait")
@wait.setter
def wait(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait", value)
class App(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
answers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
catalog_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
force_upgrade: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
revision_id: Optional[pulumi.Input[str]] = None,
target_namespace: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
values_yaml: Optional[pulumi.Input[str]] = None,
wait: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
## Import
Apps can be imported using the app ID in the format `<project_id>:<app_name>`
```sh
$ pulumi import rancher2:index/app:App foo <PROJECT_ID_ID>:<APP_NAME>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for App object (map)
:param pulumi.Input[Mapping[str, Any]] answers: Answers for the app template. If modified, app will be upgraded (map)
:param pulumi.Input[str] catalog_name: Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
:param pulumi.Input[str] description: Description for the app (string)
:param pulumi.Input[bool] force_upgrade: Force app upgrade (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for App object (map)
:param pulumi.Input[str] name: The name of the app (string)
:param pulumi.Input[str] project_id: The project id where the app will be installed (string)
:param pulumi.Input[str] revision_id: Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
:param pulumi.Input[str] target_namespace: The namespace id where the app will be installed (string)
:param pulumi.Input[str] template_name: Template name of the app. If modified, app will be upgraded (string)
:param pulumi.Input[str] template_version: Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
:param pulumi.Input[str] values_yaml: values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
:param pulumi.Input[bool] wait: Wait until app is deployed and active. Default: `true` (bool)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AppArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Apps can be imported using the app ID in the format `<project_id>:<app_name>`
```sh
$ pulumi import rancher2:index/app:App foo <PROJECT_ID_ID>:<APP_NAME>
```
:param str resource_name: The name of the resource.
:param AppArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AppArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
answers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
catalog_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
force_upgrade: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
revision_id: Optional[pulumi.Input[str]] = None,
target_namespace: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
values_yaml: Optional[pulumi.Input[str]] = None,
wait: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AppArgs.__new__(AppArgs)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["answers"] = answers
if catalog_name is None and not opts.urn:
raise TypeError("Missing required property 'catalog_name'")
__props__.__dict__["catalog_name"] = catalog_name
__props__.__dict__["description"] = description
__props__.__dict__["force_upgrade"] = force_upgrade
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["revision_id"] = revision_id
if target_namespace is None and not opts.urn:
raise TypeError("Missing required property 'target_namespace'")
__props__.__dict__["target_namespace"] = target_namespace
if template_name is None and not opts.urn:
raise TypeError("Missing required property 'template_name'")
__props__.__dict__["template_name"] = template_name
__props__.__dict__["template_version"] = template_version
__props__.__dict__["values_yaml"] = values_yaml
__props__.__dict__["wait"] = wait
__props__.__dict__["external_id"] = None
super(App, __self__).__init__(
'rancher2:index/app:App',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
answers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
catalog_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
external_id: Optional[pulumi.Input[str]] = None,
force_upgrade: Optional[pulumi.Input[bool]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
revision_id: Optional[pulumi.Input[str]] = None,
target_namespace: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
template_version: Optional[pulumi.Input[str]] = None,
values_yaml: Optional[pulumi.Input[str]] = None,
wait: Optional[pulumi.Input[bool]] = None) -> 'App':
"""
Get an existing App resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for App object (map)
:param pulumi.Input[Mapping[str, Any]] answers: Answers for the app template. If modified, app will be upgraded (map)
:param pulumi.Input[str] catalog_name: Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
:param pulumi.Input[str] description: Description for the app (string)
:param pulumi.Input[str] external_id: (Computed) The url of the app template on a catalog (string)
:param pulumi.Input[bool] force_upgrade: Force app upgrade (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for App object (map)
:param pulumi.Input[str] name: The name of the app (string)
:param pulumi.Input[str] project_id: The project id where the app will be installed (string)
:param pulumi.Input[str] revision_id: Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
:param pulumi.Input[str] target_namespace: The namespace id where the app will be installed (string)
:param pulumi.Input[str] template_name: Template name of the app. If modified, app will be upgraded (string)
:param pulumi.Input[str] template_version: Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
:param pulumi.Input[str] values_yaml: values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
:param pulumi.Input[bool] wait: Wait until app is deployed and active. Default: `true` (bool)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AppState.__new__(_AppState)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["answers"] = answers
__props__.__dict__["catalog_name"] = catalog_name
__props__.__dict__["description"] = description
__props__.__dict__["external_id"] = external_id
__props__.__dict__["force_upgrade"] = force_upgrade
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["revision_id"] = revision_id
__props__.__dict__["target_namespace"] = target_namespace
__props__.__dict__["template_name"] = template_name
__props__.__dict__["template_version"] = template_version
__props__.__dict__["values_yaml"] = values_yaml
__props__.__dict__["wait"] = wait
return App(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations for App object (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter
def answers(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Answers for the app template. If modified, app will be upgraded (map)
"""
return pulumi.get(self, "answers")
@property
@pulumi.getter(name="catalogName")
def catalog_name(self) -> pulumi.Output[str]:
"""
Catalog name of the app. If modified, app will be upgraded. For use scoped catalogs:
* add cluster ID before name, `local:<name>` or `c-XXXXX:<name>`
* add project ID before name, `p-XXXXX:<name>`
"""
return pulumi.get(self, "catalog_name")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description for the app (string)
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Output[str]:
"""
(Computed) The url of the app template on a catalog (string)
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter(name="forceUpgrade")
def force_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Force app upgrade (string)
"""
return pulumi.get(self, "force_upgrade")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels for App object (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the app (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project id where the app will be installed (string)
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="revisionId")
def revision_id(self) -> pulumi.Output[str]:
"""
Current revision id for the app. If modified, If this argument is provided or modified, app will be rollbacked to `revision_id` (string)
"""
return pulumi.get(self, "revision_id")
@property
@pulumi.getter(name="targetNamespace")
def target_namespace(self) -> pulumi.Output[str]:
"""
The namespace id where the app will be installed (string)
"""
return pulumi.get(self, "target_namespace")
@property
@pulumi.getter(name="templateName")
def template_name(self) -> pulumi.Output[str]:
"""
Template name of the app. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "template_name")
@property
@pulumi.getter(name="templateVersion")
def template_version(self) -> pulumi.Output[str]:
"""
Template version of the app. If modified, app will be upgraded. Default: `latest` (string)
"""
return pulumi.get(self, "template_version")
@property
@pulumi.getter(name="valuesYaml")
def values_yaml(self) -> pulumi.Output[Optional[str]]:
"""
values.yaml base64 encoded file content for the app template. If modified, app will be upgraded (string)
"""
return pulumi.get(self, "values_yaml")
@property
@pulumi.getter
def wait(self) -> pulumi.Output[Optional[bool]]:
"""
Wait until app is deployed and active. Default: `true` (bool)
"""
return pulumi.get(self, "wait")
| 43.610771 | 182 | 0.632108 | 4,314 | 35,630 | 5.038248 | 0.044738 | 0.09717 | 0.080515 | 0.07389 | 0.927168 | 0.909363 | 0.892294 | 0.877479 | 0.861238 | 0.845733 | 0 | 0.000678 | 0.254364 | 35,630 | 816 | 183 | 43.664216 | 0.817443 | 0.306371 | 0 | 0.803719 | 1 | 0 | 0.088671 | 0.000962 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165289 | false | 0.002066 | 0.010331 | 0 | 0.274793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
1caa408f9322cd1d9fb9ea91d4ab8c6d782059d3 | 48,048 | py | Python | amazon/paapi.py | geoffjukes/amazon-paapi5 | 41ffcf67e144c15b992eb50f2b4fc6dd3aa0068c | [
"MIT"
] | null | null | null | amazon/paapi.py | geoffjukes/amazon-paapi5 | 41ffcf67e144c15b992eb50f2b4fc6dd3aa0068c | [
"MIT"
] | null | null | null | amazon/paapi.py | geoffjukes/amazon-paapi5 | 41ffcf67e144c15b992eb50f2b4fc6dd3aa0068c | [
"MIT"
] | null | null | null | """
.. module:: paapi
"""
from paapi5_python_sdk.api.default_api import DefaultApi
from paapi5_python_sdk.api_client import ApiClient
from paapi5_python_sdk.configuration import Configuration
from paapi5_python_sdk.partner_type import PartnerType
from paapi5_python_sdk.rest import ApiException
from paapi5_python_sdk.get_items_request import GetItemsRequest
from paapi5_python_sdk.search_items_request import SearchItemsRequest
from paapi5_python_sdk.get_variations_request import GetVariationsRequest
from paapi5_python_sdk.get_browse_nodes_request import GetBrowseNodesRequest
import time, json, pickle, pprint, os
from urllib.parse import quote as urllib_quote
from .entities import AmazonProduct, AmazonBrowseNode
from .constant import *
from .exception import AmazonException
def _quote_query(query):
"""Turn a dictionary into a query string in a URL, with keys
in alphabetical order."""
return "&".join("%s=%s" % (
k, urllib_quote(
str(query[k]).encode('utf-8'), safe='~'))
for k in sorted(query))
def parse_response_browse_node(browse_nodes_response_list):
"""
The function parses Browse Nodes Response and creates a dict of BrowseNodeID to AmazonBrowseNode object
params
*browse_nodes_response_list*
List of BrowseNodes in GetBrowseNodes response
return
Dict of BrowseNodeID to AmazonBrowseNode object
"""
mapped_response = {}
for browse_node in browse_nodes_response_list:
mapped_response[browse_node.id] = browse_node
return mapped_response
def parse_response_item(item_response_list):
"""
The function parses GetItemsResponse and creates a dict of ASIN to AmazonProduct object
params:
*item_response_list*
List of Items in GetItemsResponse
return
Dict of ASIN to AmazonProduct object
"""
mapped_response = {}
for item in item_response_list:
mapped_response[item.asin] = item
return mapped_response
class AmazonAPI:
"""
Creates an instance containing your API credentials.
params:
*access_key (string)*
Your API key
*secret_key (string)*
Your API secret
*partner_tag (string)*
The tag you want to use for the URL
*country (string)*
Country code
*throttling (float, optional)*
Reduce this value to wait longer between API calls
*CacheReader (function)*
Write a function to read the stored responses from previous api calls
*CacheWriter (function)*
Write a function to save the responses returned by amazon api calls
"""
def __init__(self, access_key=None, secret_key=None, partner_tag=None, country='US', throttling=0.9, CacheReader=None, CacheWriter=None):
"""
init AmazonApi. It is necessary to specify *access_key, secret_key, partner_tag, country* parameters
By default the throttling parameter is set to 0.9. Increse or descrease this number to manage the time among different calls
params:
*access_key (string)*
amazon key of AWS account
*secret_key (string)*
amazon secret of AWS account
*partner_tag*
tag of the service Amazon Product Advertising account
*country (string)*
possible values are defined in `amazon.constant.REGIONS`
*throttling (float)*
value in the range (0,1] to wait among calls
*CacheReader (function)*
function to read from cache
*CacheWriter (function)*
function to write results into the cache
"""
self.access_key = access_key or os.environ.get('AWS_ACCESS_KEY_ID')
self.secret_key = secret_key or os.environ.get('AWS_SECRET_ACCESS_KEY')
self.partner_tag = partner_tag or os.environ.get('AWS_ASSOCIATE_TAG')
if not all([self.access_key, self.secret_key, self.partner_tag]):
raise AmazonException('Missing Credentials (AccessKey, SecretKey or AssociateTag). Please specify credentials.')
self.throttling = throttling
self.country = country
self.host = 'webservices.amazon.' + DOMAINS[country]
self.region = REGIONS[country]
self.marketplace = 'www.amazon.' + DOMAINS[country]
self.last_query_time = time.time()
self.CacheReader = CacheReader
self.CacheWriter = CacheWriter
self.default_api = DefaultApi(
access_key=self.access_key, secret_key=self.secret_key, host=self.host, region=self.region
)
def _cache_url(self, query):
"""
return a url used to identify the call and retrieve it from the cache if CacheReader and CacheWriter are set.
"""
return self.host + "?" + _quote_query(query)
def search_items(self, actor=None, artist=None,author=None, availability=None, brand=None, browse_node_id=None, condition=None, currency_of_preference=None, delivery_flags=None, item_count=10,item_page=1, keywords=None, languages_of_preference=None, max_price=None, merchant="All", min_price=None, min_reviews_rating=None, min_saving_percent=None, offer_count=1, search_index="All", sort_by= None, title=None, http_info=False, async_req=False, search_items_resource=SEARCH_RESOURCES):
"""
Search products based on keywords
Choose resources you want from SEARCH_RESOURCES enum
For more details, refer: https://webservices.amazon.com/paapi5/documentation/search-items.html#resources-parameter
args:
*actor (string)*
actor to search products
*artist (string)*
artist to search products
*author (string)*
author to search products
*availability (string)*
availability to search products. Admitted values: "Available", "IncludeOutOfStock"
*brand* (string, optional)*
filter the products based on the brand
*browse_node_id (string)*
search products into a specific browse node
*condition* (enum, optional)*
filter the products based on the condition
*currency_of_preference (string)*
Currency of preference in which the prices information should be returned in response. By default the prices are returned in the default currency of the marketplace. Expected currency code format is the ISO 4217 currency code (i.e. USD, EUR etc.)
*delivery_flags (list of string)*
The delivery flag filters items which satisfy a certain delivery program promoted by the specific Amazon Marketplace. For example, Prime DeliveryFlag will return items having at least one offer which is Prime Eligible.
*item_count (integer)*
number of products returned. Values in the range [1,10]. Default 10
*item_page (integer)*
can be used to fetch the specific set/page of items to be returned from the available Search Results. The number of items returned in a page is determined by the item_count parameter. For e.g. if the third set of 5 items (i.e. items numbered 11 to 15) are desired for a search request, you may specify
*keywords (string)*
keywords to search products
*languages_of_preference (list of string)*
Languages in order of preference in which the item information should be returned in response. By default the item information is returned in the default language of the marketplace.
*max_price (positive integers)*
Filters search results to items with at least one offer price below the specified value. Prices appear in lowest currency denomination. For example, in US marketplace, 3241 is $31.41.
*merchant (string)*
Filters search results to return items having at least one offer sold by target merchant. By default the value "All" is passed.
*min_price (positive integers)*
Filters search results to items with at least one offer price above the specified value. Prices appear in lowest currency denomination. For example, in US marketplace, 3241 is $32.41.
*min_reviews_rating (positive integers less than 5)*
Filters search results to items with customer review ratings above specified value.
*min_saving_percent (integers less than 100)*
Filters search results to items with at least one offer having saving percentage above the specified value
*offer_count (integer)*
The number of offers desired for each item in the search results. Default: 1
*search_index (string)*
search products based on an index. Default value "All"
*sort_by (string, optional)*
sort hte results based on the specification defined at https://webservices.amazon.com/paapi5/documentation/search-items.html#sortby-parameter
*title (string)*
Title associated with the item. Title searches are subset of Keywords searches. Use a Keywords search if a Title search does not return desired items.
*http_info (boolean)*
specify if http header should be returned
*async_req (boolean)*
specify if a thread should be created to run the request
*search_items_resource (list)*
For more details, refer: https://webservices.amazon.com/paapi5/documentation/search-items.html#resources-parameter. By deafult all possible resources are requested
return
Dict with
*data*
contains the AmazonProduct list
*http_info*
contains the http header information if requested. By default None
"""
try:
if item_count > 10 or item_count < 1:
item_count = 10
cache_url = self._cache_url(
{'partner_tag':self.partner_tag,
'partner_type':PartnerType.ASSOCIATES,
'keywords':keywords,
'search_index':search_index,
'item_count':item_count,
'condition':condition,
'browse_node_id': browse_node_id,
'brand': brand,
'sort_by': sort_by,
'actor': actor,
'artist': artist,
'author': author,
'availability': availability,
'currency_of_preference': currency_of_preference,
'delivery_flags': delivery_flags,
'item_page': item_page,
'languages_of_preference': languages_of_preference,
'max_price': max_price,
'merchant': merchant,
'min_price': min_price,
'min_reviews_rating': min_reviews_rating,
'min_saving_percent': min_saving_percent,
'offer_count': offer_count,
'title': title
}
)
if self.CacheReader:
cached_response_text = self.CacheReader(cache_url)
if cached_response_text is not None:
return {'data': pickle.loads(cached_response_text['data']), 'http_info': pickle.loads(cached_response_text['http_info'])}
search_items_request = SearchItemsRequest(
partner_tag=self.partner_tag,
partner_type=PartnerType.ASSOCIATES,
actor=actor,
artist=artist,
author=author,
availability=availability,
brand=brand,
browse_node_id=browse_node_id,
condition=condition,
currency_of_preference=currency_of_preference,
delivery_flags=delivery_flags,
item_count=item_count,
item_page=item_page,
keywords=keywords,
languages_of_preference=languages_of_preference,
max_price=max_price,
merchant=merchant,
min_price=min_price,
min_reviews_rating=min_reviews_rating,
min_saving_percent=min_saving_percent,
offer_count=offer_count,
resources=search_items_resource,
search_index=search_index,
sort_by=sort_by,
title=title
)
except ValueError as exception:
#print("Error in forming SearchItemsRequest: ", exception)
raise AmazonException("ValueError", exception)
except AmazonException as exception:
#print("Error in forming SearchItemsRequest: ", exception)
raise AmazonException(exception.status, exception.reason)
try:
""" Sending request """
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
resp_http = None
if http_info:
response_with_http_info = self.default_api.search_items_with_http_info(search_items_request)
""" Parse response """
if response_with_http_info is not None:
response = response_with_http_info[0]
resp_http = response_with_http_info[2]
if response.search_result is not None:
resp = [ AmazonProduct(item) for item in response.search_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': resp, 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
else:
if async_req:
thread = self.default_api.search_items(search_items_request, async_req=True)
response = thread.get()
else:
response = self.default_api.search_items(search_items_request)
""" Parse response """
if response.search_result is not None:
resp = [ AmazonProduct(item) for item in response.search_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': resp, 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
except ApiException as exception:
#print("Error calling PA-API 5.0!")
#print("Status code:", exception.status)
#print("Errors :", exception.body)
#print("Request ID:", exception.headers["x-amzn-RequestId"])
raise AmazonException("ApiException", exception.body)
except TypeError as exception:
#print("TypeError :", exception)
raise AmazonException("TypeError", exception)
except ValueError as exception:
#print("ValueError :", exception)
raise AmazonException(ValueError, exception)
except AmazonException as exception:
raise AmazonException(exception.status, exception.reason)
except Exception as exception:
raise AmazonException("General", exception)
def search_items_pool(self, actor=None, artist=None,author=None, availability=None, brand=None, browse_node_id=None, condition=None, currency_of_preference=None, delivery_flags=None, item_count=10,item_page=1, keywords=None, languages_of_preference=None, max_price=None, merchant="All", min_price=None, min_reviews_rating=None, min_saving_percent=None, offer_count=1, search_index="All", sort_by= None, title=None, search_items_resource=SEARCH_RESOURCES ,connetion_pool_max_size=12):
"""
Search products based on keywords. You can specify max connection pool size here. We recommend a value equal to cpu_count * 5.
Choose resources you want from SEARCH_RESOURCES enum.
For more details, refer: https://webservices.amazon.com/paapi5/documentation/search-items.html#resources-parameter
args:
*actor (string)*
actor to search products
*artist (string)*
artist to search products
*author (string)*
author to search products
*availability (string)*
availability to search products. Admitted values: "Available", "IncludeOutOfStock"
*brand* (string, optional)*
filter the products based on the brand
*browse_node_id (string)*
search products into a specific browse node
*condition* (enum, optional)*
filter the products based on the condition
*currency_of_preference (string)*
Currency of preference in which the prices information should be returned in response. By default the prices are returned in the default currency of the marketplace. Expected currency code format is the ISO 4217 currency code (i.e. USD, EUR etc.)
*delivery_flags (list of string)*
The delivery flag filters items which satisfy a certain delivery program promoted by the specific Amazon Marketplace. For example, Prime DeliveryFlag will return items having at least one offer which is Prime Eligible.
*item_count (integer)*
number of products returned. Values in the range [1,10]. Default 10
*item_page (integer)*
can be used to fetch the specific set/page of items to be returned from the available Search Results. The number of items returned in a page is determined by the item_count parameter. For e.g. if the third set of 5 items (i.e. items numbered 11 to 15) are desired for a search request, you may specify
*keywords (string)*
keywords to search products
*languages_of_preference (list of string)*
Languages in order of preference in which the item information should be returned in response. By default the item information is returned in the default language of the marketplace.
*max_price (positive integers)*
Filters search results to items with at least one offer price below the specified value. Prices appear in lowest currency denomination. For example, in US marketplace, 3241 is $31.41.
*merchant (string)*
Filters search results to return items having at least one offer sold by target merchant. By default the value "All" is passed.
*min_price (positive integers)*
Filters search results to items with at least one offer price above the specified value. Prices appear in lowest currency denomination. For example, in US marketplace, 3241 is $32.41.
*min_reviews_rating (positive integers less than 5)*
Filters search results to items with customer review ratings above specified value.
*min_saving_percent (integers less than 100)*
Filters search results to items with at least one offer having saving percentage above the specified value
*offer_count (integer)*
The number of offers desired for each item in the search results. Default: 1
*search_index (string)*
search products based on an index. Default value "All"
*sort_by (string, optional)*
sort hte results based on the specification defined at https://webservices.amazon.com/paapi5/documentation/search-items.html#sortby-parameter
*title (string)*
Title associated with the item. Title searches are subset of Keywords searches. Use a Keywords search if a Title search does not return desired items.
*search_items_resource (list)*
For more details, refer: https://webservices.amazon.com/paapi5/documentation/search-items.html#resources-parameter. By deafult all possible resources are requested
*connetion_pool_max_size (integer)*
sice of connection pool. Default 12
return
Dict with
*data*
contains the AmazonProduct list
*http_info*
contains the http header information if requested. By default None
"""
configuration = Configuration()
configuration.__init__(connetion_pool_max_size)
""" API Client Declaration """
api_client = ApiClient(
access_key=self.access_key,
secret_key=self.secret_key,
host=self.host,
region=self.region,
configuration=configuration,
)
""" API declaration """
default_api = DefaultApi(api_client=api_client)
""" Forming request """
try:
if item_count > 10 or item_count < 1:
item_count = 10
cache_url = self._cache_url(
{'partner_tag':self.partner_tag,
'partner_type':PartnerType.ASSOCIATES,
'keywords':keywords,
'search_index':search_index,
'item_count':item_count,
'condition':condition,
'browse_node_id': browse_node_id,
'brand': brand,
'sort_by': sort_by,
'actor': actor,
'artist': artist,
'author': author,
'availability': availability,
'currency_of_preference': currency_of_preference,
'delivery_flags': delivery_flags,
'item_page': item_page,
'languages_of_preference': languages_of_preference,
'max_price': max_price,
'merchant': merchant,
'min_price': min_price,
'min_reviews_rating': min_reviews_rating,
'min_saving_percent': min_saving_percent,
'offer_count': offer_count,
'title': title
}
)
if self.CacheReader:
cached_response_text = self.CacheReader(cache_url)
if cached_response_text is not None:
return {'data': pickle.loads(cached_response_text['data']), 'http_info': pickle.loads(cached_response_text['http_info'])}
search_items_request = SearchItemsRequest(
partner_tag=self.partner_tag,
partner_type=PartnerType.ASSOCIATES,
actor=actor,
artist=artist,
author=author,
availability=availability,
brand=brand,
browse_node_id=browse_node_id,
condition=condition,
currency_of_preference=currency_of_preference,
delivery_flags=delivery_flags,
item_count=item_count,
item_page=item_page,
keywords=keywords,
languages_of_preference=languages_of_preference,
max_price=max_price,
merchant=merchant,
min_price=min_price,
min_reviews_rating=min_reviews_rating,
min_saving_percent=min_saving_percent,
offer_count=offer_count,
resources=search_items_resource,
search_index=search_index,
sort_by=sort_by,
title=title
)
except ValueError as exception:
#print("Error in forming SearchItemsRequest: ", exception)
raise AmazonException("ValueError", exception)
try:
""" Sending request """
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
resp_http = None
response = default_api.search_items(search_items_request)
""" Parse response """
if response.search_result is not None:
resp = [ AmazonProduct(item) for item in response.search_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': resp, 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
except ApiException as exception:
#print("Error calling PA-API 5.0!")
#print("Status code:", exception.status)
#print("Errors :", exception.body)
#print("Request ID:", exception.headers["x-amzn-RequestId"])
raise AmazonException("ApiException", exception.body)
except TypeError as exception:
#print("TypeError :", exception)
raise AmazonException("TypeError", exception)
except ValueError as exception:
#print("ValueError :", exception)
raise AmazonException(ValueError, exception)
except AmazonException as exception:
raise AmazonException(exception.status, exception.reason)
except Exception as exception:
raise AmazonException("General", exception)
raise Exception(exception)
""" Choose resources you want from GetVariationsResource enum """
""" For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-variations.html#resources-parameter """
def get_variations(self, asin, condition=None, currency_of_preference=None, languages_of_preference=None, merchant="All", offer_count=1, variation_count=10, variation_page=1, async_req=False, http_info=False, get_variations_resources=VARIATION_RESOURCES):
"""
Get product variation using the asin of orginal product.
Choose resources you want from VARIATION_RESOURCES enum.
For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-variations.html#request-parameters
args:
*asin (string)*
asin of the product for which we want the variations
*condition* (enum, optional)*
filter the products based on the condition
*currency_of_preference (string)*
specify the currency of returned results
*languages_of_preference (list of string)*
specify the language of returned results
*merchant (string)*
Filters search results to return items having at least one offer sold by target merchant. By default the value "All" is passed.
*offer_count (integer)*
The number of offers desired for each item in the search results. Default: 1
*variation_count (integer)*
Number of variations to be returned per page. Default: 10
*variation_page (integer)*
Page number of variations returned by get_variations. Default: 1
*http_info (boolean)*
specify if http header should be returned
*async_req (boolean)*
specify if a thread should be created to run the request
*get_variations_resources (list)*
For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-variations.html#request-parameters. By deafult all possible resources are requested
return
Dict with
*data*
contains the AmazonProduct list
*http_info*
contains the http header information if requested. By default None
"""
try:
cache_url = self._cache_url(
{'partner_tag':self.partner_tag,
'partner_type':PartnerType.ASSOCIATES,
'asin':asin,
'condition': condition,
'currency_of_preference': currency_of_preference,
'languages_of_preference':languages_of_preference,
'merchant':merchant,
'offer_count': offer_count,
'variation_count': variation_count,
'variation_page': variation_page
}
)
if self.CacheReader:
cached_response_text = self.CacheReader(cache_url)
if cached_response_text is not None:
return {'data': pickle.loads(cached_response_text['data']), 'http_info': pickle.loads(cached_response_text['http_info'])}
get_variations_request = GetVariationsRequest(
partner_tag=self.partner_tag,
partner_type=PartnerType.ASSOCIATES,
marketplace=self.marketplace,
asin=asin,
condition=condition,
currency_of_preference=currency_of_preference,
languages_of_preference=languages_of_preference,
merchant=merchant,
offer_count=offer_count,
variation_count=variation_count,
variation_page=variation_page,
resources=get_variations_resources
)
except ValueError as exception:
#print("Error in forming GetVariationsRequest: ", exception)
raise AmazonException("ValueError", exception)
try:
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
resp_http = None
""" Sending request """
if http_info:
response_with_http_info = self.default_api.get_variations_with_http_info(get_variations_request)
""" Parse response """
if response_with_http_info is not None:
response = response_with_http_info[0]
resp_http = response_with_http_info[2]
if response.variations_result is not None:
resp = [ AmazonProduct(item) for item in response.variations_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': resp, 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
else:
if async_req:
thread = self.default_api.get_variations(get_variations_request, async_req=True)
response = thread.get()
else:
response = self.default_api.get_variations(get_variations_request)
""" Parse response """
if response.variations_result is not None:
resp = [ AmazonProduct(item) for item in response.variations_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': resp, 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
except ApiException as exception:
#print("Error calling PA-API 5.0!")
#print("Status code:", exception.status)
#print("Errors :", exception.body)
#print("Request ID:", exception.headers["x-amzn-RequestId"])
raise AmazonException("ApiException", exception.body)
except TypeError as exception:
#print("TypeError :", exception)
raise AmazonException("TypeError", exception)
except ValueError as exception:
#print("ValueError :", exception)
raise AmazonException(ValueError, exception)
except AmazonException as exception:
raise AmazonException(exception.status, exception.reason)
except Exception as exception:
raise AmazonException("General", exception)
""" Choose resources you want from GetItemsResource enum """
""" For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-items.html#resources-parameter """
def get_items(self, item_ids=[], condition=None, currency_of_preference=None, item_id_type="ASIN",languages_of_preference=None, merchant="All", offer_count=1, http_info=False, async_req=False, get_items_resource=ITEM_RESOURCES):
"""
Get items' information.
Choose resources you want from ITEM_RESOURCES enum
For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-items.html#ItemLookup-rp
args:
*item_ids (list of string)*
list of asin of the products of interest
*condition* (enum, optional)*
filter the products based on the condition
*currency_of_preference (string)*
specify the currency of returned results
*item_id_type (string)*
Type of item identifier used to look up an item. Default: ASIN
*languages_of_preference (list of string)*
Languages in order of preference in which the item information should be returned in response. By default the item information is returned in the default language of the marketplace
*merchant (string)*
Filters search results to return items having at least one offer sold by target merchant. By default the value "All" is passed.
*offer_count (integer)*
The number of offers desired for each item in the search results. Default: 1
*http_info (boolean)*
specify if http header should be returned
*async_req (boolean)*
specify if a thread should be created to run the request
*get_items_resource (list)*
For more details, refer: https://webservices.amazon.com/paapi5/documentation/get-items.html#ItemLookup-rp. By deafult all possible resources are requested
return
Dict with
*data*
Dict of ASIN to AmazonProduct object
*http_info*
contains the http header information if requested. By default None
"""
if len(item_ids) == 0:
raise Exception('No item ids specified')
""" Forming request """
try:
cache_url = self._cache_url(
{'partner_tag':self.partner_tag,
'partner_type':PartnerType.ASSOCIATES,
'item_ids':item_ids,
'condition':condition,
'currency_of_preference': currency_of_preference,
'item_id_type': item_id_type,
'languages_of_preference': languages_of_preference,
'merchant': merchant,
'offer_count': offer_count
}
)
if self.CacheReader:
cached_response_text = self.CacheReader(cache_url)
if cached_response_text is not None:
return {'data': parse_response_item( pickle.loads(cached_response_text['data']) ), 'http_info': pickle.loads(cached_response_text['http_info'])}
get_items_request = GetItemsRequest(
partner_tag=self.partner_tag,
partner_type=PartnerType.ASSOCIATES,
marketplace=self.marketplace,
item_ids=item_ids,
condition=condition,
currency_of_preference=currency_of_preference,
item_id_type=item_id_type,
languages_of_preference=languages_of_preference,
merchant=merchant,
offer_count=offer_count,
resources=get_items_resource
)
except ValueError as exception:
#print("Error in forming GetItemsRequest: ", exception)
raise AmazonException("ValueError", exception)
try:
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
resp_http = None
if http_info:
response_with_http_info = self.default_api.get_items_with_http_info(
get_items_request
)
""" Parse response """
if response_with_http_info is not None:
response = response_with_http_info[0]
resp_http = response_with_http_info[2]
if response.items_result is not None:
resp = [ AmazonProduct(item) for item in response.items_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': parse_response_item(resp), 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
else:
""" Sending request """
if async_req:
thread = self.default_api.get_items(get_items_request, async_req=True)
response = thread.get()
else:
response = self.default_api.get_items(get_items_request)
""" Parse response """
if response.items_result is not None:
resp = [ AmazonProduct(item) for item in response.items_result.items]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': parse_response_item(resp), 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
except ApiException as exception:
#print("Error calling PA-API 5.0!")
#print("Status code:", exception.status)
#print("Errors :", exception.body)
#print("Request ID:", exception.headers["x-amzn-RequestId"])
raise AmazonException("ApiException", exception.body)
except TypeError as exception:
#print("TypeError :", exception)
raise AmazonException("TypeError", exception)
except ValueError as exception:
#print("ValueError :", exception)
raise AmazonException(ValueError, exception)
except AmazonException as exception:
raise AmazonException(exception.status, exception.reason)
except Exception as exception:
raise AmazonException("General", exception)
""" Choose resources you want from GetBrowseNodesResource enum """
""" For more details, refer: https://webservices.amazon.com/paapi5/documentation/getbrowsenodes.html#resources-parameter """
def get_browse_nodes(self, browse_node_ids=[], languages_of_preference = None, http_info=False, async_req=False, get_browse_node_resources=BROWSE_RESOURCES):
""""
Get browse nodes' information.
Choose resources you want from BROWSE_RESOURCES enum
For more details, refer: https://webservices.amazon.com/paapi5/documentation/getbrowsenodes.html#request-parameters
args:
*browse_node_ids (list of string)*
list of browse node ids
*languages_of_preference (list of string)*
specify the language of returned results
*http_info (boolean)*
specify if http header should be returned
*async_req (boolean)*
specify if a thread should be created to run the request
*get_browse_node_resources (list)*
For more details, refer: https://webservices.amazon.com/paapi5/documentation/getbrowsenodes.html#request-parameters. By deafult all possible resources are requested
return
Dict with
*data*
Dict of BrowseNodeID to AmazonBrowseNode object
*http_info*
contains the http header information if requested. By default None
"""
if isinstance(browse_node_ids, list) == False or len (browse_node_ids) == 0:
raise Exception('Browse node ids are not in the right format')
""" Forming request """
try:
cache_url = self._cache_url(
{'partner_tag':self.partner_tag,
'partner_type':PartnerType.ASSOCIATES,
'browse_node_ids':browse_node_ids,
'languages_of_preference':languages_of_preference }
)
if self.CacheReader:
cached_response_text = self.CacheReader(cache_url)
if cached_response_text is not None:
return {'data': parse_response_browse_node (pickle.loads(cached_response_text['data']) ), 'http_info': pickle.loads(cached_response_text['http_info'])}
get_browse_node_request = GetBrowseNodesRequest(
partner_tag=self.partner_tag,
partner_type=PartnerType.ASSOCIATES,
marketplace=self.marketplace,
languages_of_preference=languages_of_preference,
browse_node_ids=browse_node_ids,
resources=get_browse_node_resources,
)
except ValueError as exception:
#print("Error in forming GetBrowseNodesRequest: ", exception)
raise AmazonException("ValueError", exception)
try:
wait_time = 1 / self.throttling - (time.time() - self.last_query_time)
if wait_time > 0:
time.sleep(wait_time)
self.last_query_time = time.time()
resp_http = None
if http_info:
response_with_http_info = self.default_api.get_browse_nodes_with_http_info(get_browse_node_request)
""" Parse response """
if response_with_http_info is not None:
response = response_with_http_info[0]
resp_http = response_with_http_info[2]
if response.browse_nodes_result is not None:
resp = [ AmazonBrowseNode(node) for node in response.browse_nodes_result.browse_nodes]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': parse_response_browse_node(resp), 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
else:
""" Sending request """
if async_req:
thread = self.default_api.get_browse_nodes(get_browse_node_request, async_req=True)
response = thread.get()
else:
response = self.default_api.get_browse_nodes(get_browse_node_request)
""" Parse response """
if response.browse_nodes_result is not None:
resp = [ AmazonBrowseNode(item) for item in response.browse_nodes_result.browse_nodes]
if self.CacheWriter:
self.CacheWriter(cache_url, pickle.dumps(resp), pickle.dumps(resp_http))
return {'data': parse_response_browse_node(resp), 'http_info': resp_http}
if response.errors is not None:
#print("\nPrinting Errors:\nPrinting First Error Object from list of Errors")
#print("Error code", response.errors[0].code)
#print("Error message", response.errors[0].message)
raise AmazonException(response.errors[0].code, response.errors[0].message)
except ApiException as exception:
#print("Error calling PA-API 5.0!")
#print("Status code:", exception.status)
#print("Errors :", exception.body)
#print("Request ID:", exception.headers["x-amzn-RequestId"])
raise AmazonException("ApiException", exception.body)
except TypeError as exception:
#print("TypeError :", exception)
raise AmazonException("TypeError", exception)
except ValueError as exception:
#print("ValueError :", exception)
raise AmazonException(ValueError, exception)
except AmazonException as exception:
raise AmazonException(exception.status, exception.reason)
except Exception as exception:
raise AmazonException("General", exception)
| 49.790674 | 491 | 0.606997 | 5,195 | 48,048 | 5.440039 | 0.072955 | 0.025052 | 0.019108 | 0.012101 | 0.854605 | 0.833233 | 0.807969 | 0.802095 | 0.795973 | 0.788012 | 0 | 0.006121 | 0.319993 | 48,048 | 964 | 492 | 49.842324 | 0.858844 | 0.362367 | 0 | 0.753165 | 0 | 0 | 0.056197 | 0.008164 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021097 | false | 0 | 0.029536 | 0 | 0.090717 | 0.00211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1cc2185cb948841af621d33664decd08e512a7c8 | 9,450 | py | Python | tests/model/test_linear_model.py | chezou/molehill | 02254e6bf2185174112aad7c607f60305ce9b20c | [
"Apache-2.0"
] | 3 | 2019-03-13T09:01:10.000Z | 2022-03-25T16:34:54.000Z | tests/model/test_linear_model.py | chezou/molehill | 02254e6bf2185174112aad7c607f60305ce9b20c | [
"Apache-2.0"
] | 9 | 2019-02-15T08:53:37.000Z | 2019-03-13T06:48:05.000Z | tests/model/test_linear_model.py | chezou/molehill | 02254e6bf2185174112aad7c607f60305ce9b20c | [
"Apache-2.0"
] | null | null | null | import molehill
from molehill.model import train_classifier, train_regressor
from molehill.model import predict_classifier, predict_regressor
class TestTrainClassifier:
def test_train_classifier(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
select
train_classifier(
features
, target_val
) as (feature, weight)
from
src_tbl
;
"""
assert train_classifier("src_tbl", "target_val") == ret_sql
def test_train_classifier_pos_oversampling(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with train_oversampled as (
select
features
, target_val
from
src_tbl
where target_val = 0
union all
select
features
, target_val
from
(
select
amplify(${{oversample_pos_n_times}}, features, target_val) as (features, target_val)
from
src_tbl
where target_val = 1
) t0
),
model_oversampled as (
select
train_classifier(
features
, target_val
) as (feature, weight)
from
train_oversampled
)
-- DIGDAG_INSERT_LINE
select
feature
, avg(weight) as weight
from
model_oversampled
group by
feature
;
"""
assert train_classifier("src_tbl", "target_val", oversample_pos_n_times="${oversample_pos_n_times}") == ret_sql
def test_train_classifier_oversampling(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with amplified as (
select
amplify(${{oversample_n_times}}, features, target_val) as (features, target_val)
from
src_tbl
),
train_oversampled as (
select
features
, target_val
from
amplified
CLUSTER BY rand(43)
),
model_oversampled as (
select
train_classifier(
features
, target_val
) as (feature, weight)
from
train_oversampled
)
-- DIGDAG_INSERT_LINE
select
feature
, avg(weight) as weight
from
model_oversampled
group by
feature
;
"""
assert train_classifier("src_tbl", "target_val", oversample_n_times="${oversample_n_times}") == ret_sql
def test_train_classifier_bias(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
select
train_classifier(
add_bias(features)
, target_val
) as (feature, weight)
from
src_tbl
;
"""
assert train_classifier("src_tbl", "target_val", bias=True) == ret_sql
def test_train_classifier_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
select
train_classifier(
feature_hashing(features)
, target_val
) as (feature, weight)
from
src_tbl
;
"""
assert train_classifier("src_tbl", "target_val", hashing=True) == ret_sql
def test_train_classifier_bias_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
select
train_classifier(
add_bias(feature_hashing(features))
, target_val
) as (feature, weight)
from
src_tbl
;
"""
assert train_classifier("src_tbl", "target_val", bias=True, hashing=True) == ret_sql
def test_train_regressor():
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
select
train_regressor(
features
, target_val
) as (feature, weight)
from
src_tbl
;
"""
assert train_regressor("src_tbl", "target_val") == ret_sql
class TestPredictClassifier:
def test_predict_classifier(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(features) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sigmoid(sum(m1.weight * t1.value)) as probability
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_classifier("target_tbl", "id", "model_tbl")
assert pred_sql == ret_sql
assert pred_col == "probability"
def test_predict_classifier_bias(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(add_bias(features)) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sigmoid(sum(m1.weight * t1.value)) as probability
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_classifier("target_tbl", "id", "model_tbl", bias=True)
assert pred_sql == ret_sql
assert pred_col == "probability"
def test_predict_classifier_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(feature_hashing(features)) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sigmoid(sum(m1.weight * t1.value)) as probability
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_classifier("target_tbl", "id", "model_tbl", hashing=True)
assert pred_sql == ret_sql
assert pred_col == "probability"
def test_predict_classifier_bias_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(add_bias(feature_hashing(features))) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sigmoid(sum(m1.weight * t1.value)) as probability
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_classifier("target_tbl", "id", "model_tbl", bias=True, hashing=True)
assert pred_sql == ret_sql
assert pred_col == "probability"
def test_predict_classifier_wo_sigmoid(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(features) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sum(m1.weight * t1.value) as total_weight
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_classifier("target_tbl", "id", "model_tbl", sigmoid=False)
assert pred_sql == ret_sql
assert pred_col == "total_weight"
class TestPredictRegressor:
def test_predict_regressor(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(features) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sum(m1.weight * t1.value) as target
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_regressor("target_tbl", "id", "model_tbl", "target")
assert pred_sql == ret_sql
assert pred_col == "target"
def test_predict_regressor_bias(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(add_bias(features)) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sum(m1.weight * t1.value) as target
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_regressor("target_tbl", "id", "model_tbl", "target", bias=True)
assert pred_sql == ret_sql
assert pred_col == "target"
def test_predict_regressor_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(feature_hashing(features)) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sum(m1.weight * t1.value) as target
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_regressor("target_tbl", "id", "model_tbl", "target", hashing=True)
assert pred_sql == ret_sql
assert pred_col == "target"
def test_predict_regressor_bias_hashing(self):
ret_sql = f"""\
-- client: molehill/{molehill.__version__}
with features_exploded as (
select
id
, extract_feature(fv) as feature
, extract_weight(fv) as value
from
target_tbl t1
LATERAL VIEW explode(add_bias(feature_hashing(features))) t2 as fv
)
-- DIGDAG_INSERT_LINE
select
t1.id
, sum(m1.weight * t1.value) as target
from
features_exploded t1
left outer join model_tbl m1
on (t1.feature = m1.feature)
group by
t1.id
;
"""
pred_sql, pred_col = predict_regressor("target_tbl", "id", "model_tbl", "target", bias=True, hashing=True)
assert pred_sql == ret_sql
assert pred_col == "target"
| 22.393365 | 119 | 0.684127 | 1,274 | 9,450 | 4.77865 | 0.063579 | 0.031537 | 0.018397 | 0.034166 | 0.925591 | 0.921156 | 0.913108 | 0.900624 | 0.874507 | 0.854468 | 0 | 0.012824 | 0.216085 | 9,450 | 421 | 120 | 22.446556 | 0.80899 | 0 | 0 | 0.78934 | 0 | 0 | 0.673545 | 0.102011 | 0 | 0 | 0 | 0 | 0.063452 | 1 | 0.040609 | false | 0 | 0.007614 | 0 | 0.055838 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
1cd51612f627b7a8e1b29a302f5889341b462891 | 7,113 | py | Python | tests/LeNet5/memoryTest.py | GIS-PuppetMaster/tinyflow | 9351a95957b68c37f480c169ff9dd012b68b4b1e | [
"MIT"
] | null | null | null | tests/LeNet5/memoryTest.py | GIS-PuppetMaster/tinyflow | 9351a95957b68c37f480c169ff9dd012b68b4b1e | [
"MIT"
] | null | null | null | tests/LeNet5/memoryTest.py | GIS-PuppetMaster/tinyflow | 9351a95957b68c37f480c169ff9dd012b68b4b1e | [
"MIT"
] | 1 | 2020-09-27T07:27:47.000Z | 2020-09-27T07:27:47.000Z | import numpy as np
from pycode.tinyflow import autodiff as ad
from pycode.tinyflow import gpu_op
from pycode.tinyflow import ndarray
from pycode.tinyflow import train
def test_dense():
inputs = ad.Placeholder("inputs")
filters = ad.Variable("filters")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))
filters_val = np.ones((32, 1, 5, 5)) * 0.001
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
filters_val = ndarray.array(filters_val, ctx)
y_val = ndarray.array(y_val, ctx)
# outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1)
outputs = ad.conv2withbias(inputs, filters, b, "NCHW", "VALID", 1, 1)
aph = 0.001
t = train.Adam_minimize(outputs, aph)
# outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({filters: filters_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
# test_dense()
def test_pool():
inputs = ad.Placeholder("inputs")
filters = ad.Variable("filters")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))
filters_val = np.ones((32, 1, 5, 5)) * 0.001
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
filters_val = ndarray.array(filters_val, ctx)
y_val = ndarray.array(y_val, ctx)
# outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1)
outputs = ad.conv2withbias(inputs, filters, b, "NCHW", "VALID", 1, 1)
outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
aph = 0.001
t = train.Adam_minimize(outputs_pool, aph)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({filters: filters_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
# test_pool()
def test_bn():
inputs = ad.Placeholder("inputs")
filters = ad.Variable("filters")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))
filters_val = np.ones((32, 1, 5, 5)) * 0.001
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
filters_val = ndarray.array(filters_val, ctx)
y_val = ndarray.array(y_val, ctx)
# outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1)
outputs = ad.conv2withbias(inputs, filters, b, "NCHW", "VALID", 1, 1)
outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
outputs_bn = ad.bn_forward_op(outputs_pool, "NCHW", "pre_activation")
aph = 0.001
t = train.Adam_minimize(outputs_bn, aph)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({filters: filters_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
# test_bn()
def test_flat():
inputs = ad.Placeholder("inputs")
filters = ad.Variable("filters")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))
filters_val = np.ones((32, 1, 5, 5)) * 0.001
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
filters_val = ndarray.array(filters_val, ctx)
y_val = ndarray.array(y_val, ctx)
# outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1)
outputs = ad.conv2withbias(inputs, filters, b, "NCHW", "VALID", 1, 1)
outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
outputs_flat = ad.flatten_op(outputs_pool)
aph = 0.001
t = train.Adam_minimize(outputs_flat, aph)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({filters: filters_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
# test_flat()
def test_bnfully():
inputs = ad.Placeholder("inputs")
filters = ad.Variable("filters")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 320000).reshape((3200, 1, 10, 10))
filters_val = np.ones((32, 1, 5, 5)) * 0.001
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
filters_val = ndarray.array(filters_val, ctx)
y_val = ndarray.array(y_val, ctx)
# outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1)
outputs = ad.conv2withbias(inputs, filters, b, "NCHW", "VALID", 1, 1)
outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
outputs_flat = ad.flatten_op(outputs_pool)
outputs_bn = ad.fullybn_forward_op(outputs_flat, "NCHW")
aph = 0.001
t = train.Adam_minimize(outputs_bn, aph)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({filters: filters_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
# test_bnfully()
def test_matmul():
inputs = ad.Placeholder("inputs")
w = ad.Variable("w")
b = ad.Variable("b")
y_ = ad.Variable(name="y_")
# ini
ctx = ndarray.gpu(0)
x_val = np.linspace(0, 1000, 9000000).reshape((3000, 3000))
w_val = np.linspace(0, 1000, 9000000).reshape((3000, 3000))
b_val = np.ones((32))
y_val = np.zeros((5, 1))
x_val = ndarray.array(x_val, ctx)
w_val = ndarray.array(w_val, ctx)
y_val = ndarray.array(y_val, ctx)
xw = ad.matmul_op(inputs, w)
aph = 0.001
t = train.Adam_minimize(xw, aph)
#outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
#executor = train.TrainExecutor([outputs], ctx=ctx)
t.init_Variable({w: w_val, b: b_val})
for i in range(100000):
if i % 100 ==0:
print(i)
loss_val = t.run(feed_dict={inputs: x_val, b: b_val})
print(loss_val[0].asnumpy())
test_matmul() | 31.061135 | 87 | 0.624069 | 1,111 | 7,113 | 3.80468 | 0.076508 | 0.022711 | 0.063875 | 0.022711 | 0.902531 | 0.902531 | 0.902531 | 0.896617 | 0.873906 | 0.852378 | 0 | 0.066166 | 0.213834 | 7,113 | 229 | 88 | 31.061135 | 0.689735 | 0.175172 | 0 | 0.835616 | 0 | 0 | 0.031689 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041096 | false | 0 | 0.034247 | 0 | 0.075342 | 0.082192 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1ce1c755e66be7ed278e309993456ad5470ad157 | 49,694 | py | Python | 2017/python/src/gridday14.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | 1 | 2018-12-06T06:15:59.000Z | 2018-12-06T06:15:59.000Z | 2017/python/src/gridday14.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | null | null | null | 2017/python/src/gridday14.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Advent of Code 2017: Day #
"""
GRID = [
[0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1],
[1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1],
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]
]
| 368.103704 | 389 | 0.330483 | 16,394 | 49,694 | 1.001769 | 0.000732 | 0.499665 | 0.380686 | 0.262559 | 0.997625 | 0.997625 | 0.997625 | 0.997625 | 0.997625 | 0.997625 | 0 | 0.495465 | 0.334366 | 49,694 | 134 | 390 | 370.850746 | 0.001028 | 0.000966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 |
1cfa1dcaf6927b72539e49bf9c78b32db229b454 | 2,059 | py | Python | algoexpert.io/python/Search_For_Range.py | XSoyOscar/Algorithms | 6e1626d4b0f7804494f0a651698966ad6fd0fe18 | [
"MIT"
] | 80 | 2020-07-02T20:47:21.000Z | 2022-03-22T06:52:59.000Z | algoexpert.io/python/Search_For_Range.py | XSoyOscar/Algorithms | 6e1626d4b0f7804494f0a651698966ad6fd0fe18 | [
"MIT"
] | 1 | 2020-10-05T19:22:10.000Z | 2020-10-05T19:22:10.000Z | algoexpert.io/python/Search_For_Range.py | XSoyOscar/Algorithms | 6e1626d4b0f7804494f0a651698966ad6fd0fe18 | [
"MIT"
] | 73 | 2020-04-09T22:28:01.000Z | 2022-02-26T19:22:25.000Z |
# Solution #1 - Recursive SOlution
# O(logn) time | O(logn) space
def searchForRange(array, target):
finalRange = [-1, -1]
alteredBinarySearch(array, target, 0, len(array) - 1, finalRange, True)
alteredBinarySearch(array, target, 0, len(array) - 1, finalRange, False)
return finalRange
def alteredBinarySearch(array, target, left, right, finalRange, goLeft):
if left > right:
return
mid = (left + right) // 2
if array[mid] < target:
alteredBinarySearch(array, target, mid + 1, right, finalRange, goLeft)
elif array[mid] > target:
alteredBinarySearch(array, target, left, mid - 1, finalRange, goLeft)
else:
if goLeft:
if mid == 0 or array[mid - 1] != target:
finalRange[0] = mid
else:
alteredBinarySearch(array, target, left, mid - 1, finalRange, goLeft)
else:
if mid == len(array) - 1 or array[mid + 1] != target:
finalRange[1] = mid
else:
alteredBinarySearch(array, target, mid + 1, right, finalRange, goLeft)
# Solution #2 - Iterative SOlution
# O(logn) time | O(1) space
def searchForRange(array, target):
finalRange = [-1, -1]
alteredBinarySearch(array, target, 0, len(array) - 1, finalRange, True)
alteredBinarySearch(array, target, 0, len(array) - 1, finalRange, False)
return finalRange
def alteredBinarySearch(array, target, left, right, finalRange, goLeft):
while left <= right:
mid = (left + right) // 2
if array[mid] < target:
left = mid + 1
elif array[mid] > target:
right = mid - 1
else:
if goLeft:
if mid == 0 or array[mid - 1] != target:
finalRange[0] = mid
else:
right = mid - 1
else:
if mid == len(array) - 1 or array[mid + 1] != target:
finalRange[1] = mid
else:
left = mid + 1
| 29.84058 | 86 | 0.544925 | 225 | 2,059 | 4.986667 | 0.128889 | 0.117647 | 0.26738 | 0.110517 | 0.901961 | 0.853832 | 0.841355 | 0.841355 | 0.691622 | 0.691622 | 0 | 0.027428 | 0.344828 | 2,059 | 68 | 87 | 30.279412 | 0.804299 | 0.063137 | 0 | 0.93617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0 | 0 | 0.148936 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1cfa3172e1f0c8d7af2382d40980cadd485f3543 | 219 | py | Python | dzdp-server/fetcher/source/managers/__init__.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | dzdp-server/fetcher/source/managers/__init__.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | dzdp-server/fetcher/source/managers/__init__.py | Onekki/dzdp | a4625b8ef998ed09845442837b8a7b6369011f5d | [
"MIT"
] | null | null | null | from fetcher.source.managers.db import DbManager
from fetcher.source.managers.logger import LoggerManager
from fetcher.source.managers.fonts import FontManager
from fetcher.source.managers.request import RequestManager
| 43.8 | 58 | 0.872146 | 28 | 219 | 6.821429 | 0.464286 | 0.230366 | 0.356021 | 0.52356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073059 | 219 | 4 | 59 | 54.75 | 0.940887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
e817cdeaab98336b719c7fd69b1faec77db4cb4d | 123,797 | py | Python | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/dds_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/dds_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/dds_client.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class DdsClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(DdsClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkdds.v3.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "DdsClient":
raise TypeError("client type error, support client type is DdsClient")
return ClientBuilder(clazz)
def add_sharding_node(self, request):
"""扩容集群实例的节点数量
扩容指定集群实例的节点数量。
:param AddShardingNodeRequest request
:return: AddShardingNodeResponse
"""
return self.add_sharding_node_with_http_info(request)
def add_sharding_node_with_http_info(self, request):
"""扩容集群实例的节点数量
扩容指定集群实例的节点数量。
:param AddShardingNodeRequest request
:return: AddShardingNodeResponse
"""
all_params = ['instance_id', 'enlarge_instance_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/enlarge',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AddShardingNodeResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def attach_eip(self, request):
"""绑定弹性公网IP
为实例下的节点绑定弹性公网IP。
:param AttachEipRequest request
:return: AttachEipResponse
"""
return self.attach_eip_with_http_info(request)
def attach_eip_with_http_info(self, request):
"""绑定弹性公网IP
为实例下的节点绑定弹性公网IP。
:param AttachEipRequest request
:return: AttachEipResponse
"""
all_params = ['node_id', 'attach_eip_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'node_id' in local_var_params:
path_params['node_id'] = local_var_params['node_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/nodes/{node_id}/bind-eip',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AttachEipResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def attach_internal_ip(self, request):
"""修改实例内网地址
修改实例的内网地址
:param AttachInternalIpRequest request
:return: AttachInternalIpResponse
"""
return self.attach_internal_ip_with_http_info(request)
def attach_internal_ip_with_http_info(self, request):
"""修改实例内网地址
修改实例的内网地址
:param AttachInternalIpRequest request
:return: AttachInternalIpResponse
"""
all_params = ['instance_id', 'attach_internal_ip_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/modify-internal-ip',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AttachInternalIpResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def batch_tag_action(self, request):
"""批量添加或删除资源标签
批量添加或删除指定实例的标签。
:param BatchTagActionRequest request
:return: BatchTagActionResponse
"""
return self.batch_tag_action_with_http_info(request)
def batch_tag_action_with_http_info(self, request):
"""批量添加或删除资源标签
批量添加或删除指定实例的标签。
:param BatchTagActionRequest request
:return: BatchTagActionResponse
"""
all_params = ['instance_id', 'batch_operate_instance_tag_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/tags/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchTagActionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_eip(self, request):
"""解绑弹性公网IP
解绑实例下节点已经绑定的弹性公网IP。
:param CancelEipRequest request
:return: CancelEipResponse
"""
return self.cancel_eip_with_http_info(request)
def cancel_eip_with_http_info(self, request):
"""解绑弹性公网IP
解绑实例下节点已经绑定的弹性公网IP。
:param CancelEipRequest request
:return: CancelEipResponse
"""
all_params = ['node_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'node_id' in local_var_params:
path_params['node_id'] = local_var_params['node_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/nodes/{node_id}/unbind-eip',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelEipResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_password(self, request):
"""检查数据库密码
检查数据库密码。
:param CheckPasswordRequest request
:return: CheckPasswordResponse
"""
return self.check_password_with_http_info(request)
def check_password_with_http_info(self, request):
"""检查数据库密码
检查数据库密码。
:param CheckPasswordRequest request
:return: CheckPasswordResponse
"""
all_params = ['instance_id', 'check_password_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/check-password',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckPasswordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_database_role(self, request):
"""创建数据库角色
创建数据库角色。
:param CreateDatabaseRoleRequest request
:return: CreateDatabaseRoleResponse
"""
return self.create_database_role_with_http_info(request)
def create_database_role_with_http_info(self, request):
"""创建数据库角色
创建数据库角色。
:param CreateDatabaseRoleRequest request
:return: CreateDatabaseRoleResponse
"""
all_params = ['instance_id', 'create_database_role_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-role',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateDatabaseRoleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_database_user(self, request):
"""创建数据库用户
创建数据库用户。
:param CreateDatabaseUserRequest request
:return: CreateDatabaseUserResponse
"""
return self.create_database_user_with_http_info(request)
def create_database_user_with_http_info(self, request):
"""创建数据库用户
创建数据库用户。
:param CreateDatabaseUserRequest request
:return: CreateDatabaseUserResponse
"""
all_params = ['instance_id', 'create_database_user_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-user',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateDatabaseUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_instance(self, request):
"""创建实例
创建文档数据库实例,包括集群实例、副本集实例、以及单节点实例。
:param CreateInstanceRequest request
:return: CreateInstanceResponse
"""
return self.create_instance_with_http_info(request)
def create_instance_with_http_info(self, request):
"""创建实例
创建文档数据库实例,包括集群实例、副本集实例、以及单节点实例。
:param CreateInstanceRequest request
:return: CreateInstanceResponse
"""
all_params = ['create_instance_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_ip(self, request):
"""打开集群的Shard/Config IP开关
打开集群的Shard/Config IP开关
:param CreateIpRequest request
:return: CreateIpResponse
"""
return self.create_ip_with_http_info(request)
def create_ip_with_http_info(self, request):
"""打开集群的Shard/Config IP开关
打开集群的Shard/Config IP开关
:param CreateIpRequest request
:return: CreateIpResponse
"""
all_params = ['instance_id', 'create_ip_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/create-ip',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateIpResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_manual_backup(self, request):
"""创建手动备份
创建数据库实例的手动备份。
:param CreateManualBackupRequest request
:return: CreateManualBackupResponse
"""
return self.create_manual_backup_with_http_info(request)
def create_manual_backup_with_http_info(self, request):
"""创建手动备份
创建数据库实例的手动备份。
:param CreateManualBackupRequest request
:return: CreateManualBackupResponse
"""
all_params = ['create_manual_backup_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/backups',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateManualBackupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_database_role(self, request):
"""删除数据库角色
删除数据库角色。
:param DeleteDatabaseRoleRequest request
:return: DeleteDatabaseRoleResponse
"""
return self.delete_database_role_with_http_info(request)
def delete_database_role_with_http_info(self, request):
"""删除数据库角色
删除数据库角色。
:param DeleteDatabaseRoleRequest request
:return: DeleteDatabaseRoleResponse
"""
all_params = ['instance_id', 'delete_database_role_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-role',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteDatabaseRoleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_database_user(self, request):
"""删除数据库用户
删除数据库用户。
:param DeleteDatabaseUserRequest request
:return: DeleteDatabaseUserResponse
"""
return self.delete_database_user_with_http_info(request)
def delete_database_user_with_http_info(self, request):
"""删除数据库用户
删除数据库用户。
:param DeleteDatabaseUserRequest request
:return: DeleteDatabaseUserResponse
"""
all_params = ['instance_id', 'delete_database_user_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-user',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteDatabaseUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_instance(self, request):
"""删除实例
删除数据库实例。
:param DeleteInstanceRequest request
:return: DeleteInstanceResponse
"""
return self.delete_instance_with_http_info(request)
def delete_instance_with_http_info(self, request):
"""删除实例
删除数据库实例。
:param DeleteInstanceRequest request
:return: DeleteInstanceResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_manual_backup(self, request):
"""删除手动备份
删除数据库实例的手动备份。
:param DeleteManualBackupRequest request
:return: DeleteManualBackupResponse
"""
return self.delete_manual_backup_with_http_info(request)
def delete_manual_backup_with_http_info(self, request):
"""删除手动备份
删除数据库实例的手动备份。
:param DeleteManualBackupRequest request
:return: DeleteManualBackupResponse
"""
all_params = ['backup_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'backup_id' in local_var_params:
path_params['backup_id'] = local_var_params['backup_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/backups/{backup_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteManualBackupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_session(self, request):
"""终结实例节点会话
终结实例节点会话。
:param DeleteSessionRequest request
:return: DeleteSessionResponse
"""
return self.delete_session_with_http_info(request)
def delete_session_with_http_info(self, request):
"""终结实例节点会话
终结实例节点会话。
:param DeleteSessionRequest request
:return: DeleteSessionResponse
"""
all_params = ['node_id', 'delete_session_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'node_id' in local_var_params:
path_params['node_id'] = local_var_params['node_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/nodes/{node_id}/session',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteSessionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def download_errorlog(self, request):
"""获取错误日志下载链接
获取错误日志下载链接。
:param DownloadErrorlogRequest request
:return: DownloadErrorlogResponse
"""
return self.download_errorlog_with_http_info(request)
def download_errorlog_with_http_info(self, request):
"""获取错误日志下载链接
获取错误日志下载链接。
:param DownloadErrorlogRequest request
:return: DownloadErrorlogResponse
"""
all_params = ['instance_id', 'download_errorlog_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/errorlog-download',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DownloadErrorlogResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def download_slowlog(self, request):
"""获取慢日志下载链接
获取慢日志下载链接。
:param DownloadSlowlogRequest request
:return: DownloadSlowlogResponse
"""
return self.download_slowlog_with_http_info(request)
def download_slowlog_with_http_info(self, request):
"""获取慢日志下载链接
获取慢日志下载链接。
:param DownloadSlowlogRequest request
:return: DownloadSlowlogResponse
"""
all_params = ['instance_id', 'download_slowlog_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/slowlog-download',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DownloadSlowlogResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_auditlog_links(self, request):
"""获取审计日志下载链接
获取审计日志下载链接。
:param ListAuditlogLinksRequest request
:return: ListAuditlogLinksResponse
"""
return self.list_auditlog_links_with_http_info(request)
def list_auditlog_links_with_http_info(self, request):
"""获取审计日志下载链接
获取审计日志下载链接。
:param ListAuditlogLinksRequest request
:return: ListAuditlogLinksResponse
"""
all_params = ['instance_id', 'produce_auditlog_links_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/auditlog-links',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAuditlogLinksResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_auditlogs(self, request):
"""获取审计日志列表
获取审计日志列表。
:param ListAuditlogsRequest request
:return: ListAuditlogsResponse
"""
return self.list_auditlogs_with_http_info(request)
def list_auditlogs_with_http_info(self, request):
"""获取审计日志列表
获取审计日志列表。
:param ListAuditlogsRequest request
:return: ListAuditlogsResponse
"""
all_params = ['instance_id', 'start_time', 'end_time', 'x_language', 'node_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'node_id' in local_var_params:
query_params.append(('node_id', local_var_params['node_id']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/auditlog',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAuditlogsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_az2_migrate(self, request):
"""查询实例可迁移到的可用区
查询实例可迁移到的可用区。
:param ListAz2MigrateRequest request
:return: ListAz2MigrateResponse
"""
return self.list_az2_migrate_with_http_info(request)
def list_az2_migrate_with_http_info(self, request):
"""查询实例可迁移到的可用区
查询实例可迁移到的可用区。
:param ListAz2MigrateRequest request
:return: ListAz2MigrateResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/migrate/az',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAz2MigrateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_backups(self, request):
"""查询备份列表
根据指定条件查询备份列表。
:param ListBackupsRequest request
:return: ListBackupsResponse
"""
return self.list_backups_with_http_info(request)
def list_backups_with_http_info(self, request):
"""查询备份列表
根据指定条件查询备份列表。
:param ListBackupsRequest request
:return: ListBackupsResponse
"""
all_params = ['instance_id', 'backup_id', 'backup_type', 'offset', 'limit', 'begin_time', 'end_time', 'mode']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'instance_id' in local_var_params:
query_params.append(('instance_id', local_var_params['instance_id']))
if 'backup_id' in local_var_params:
query_params.append(('backup_id', local_var_params['backup_id']))
if 'backup_type' in local_var_params:
query_params.append(('backup_type', local_var_params['backup_type']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'begin_time' in local_var_params:
query_params.append(('begin_time', local_var_params['begin_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'mode' in local_var_params:
query_params.append(('mode', local_var_params['mode']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/backups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListBackupsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_database_roles(self, request):
"""查询数据库角色列表
查询数据库角色列表。
:param ListDatabaseRolesRequest request
:return: ListDatabaseRolesResponse
"""
return self.list_database_roles_with_http_info(request)
def list_database_roles_with_http_info(self, request):
"""查询数据库角色列表
查询数据库角色列表。
:param ListDatabaseRolesRequest request
:return: ListDatabaseRolesResponse
"""
all_params = ['instance_id', 'role_name', 'db_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'role_name' in local_var_params:
query_params.append(('role_name', local_var_params['role_name']))
if 'db_name' in local_var_params:
query_params.append(('db_name', local_var_params['db_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDatabaseRolesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_database_users(self, request):
"""查询数据库用户列表
查询数据库用户列表。
:param ListDatabaseUsersRequest request
:return: ListDatabaseUsersResponse
"""
return self.list_database_users_with_http_info(request)
def list_database_users_with_http_info(self, request):
"""查询数据库用户列表
查询数据库用户列表。
:param ListDatabaseUsersRequest request
:return: ListDatabaseUsersResponse
"""
all_params = ['instance_id', 'user_name', 'db_name', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'user_name' in local_var_params:
query_params.append(('user_name', local_var_params['user_name']))
if 'db_name' in local_var_params:
query_params.append(('db_name', local_var_params['db_name']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/db-user/detail',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDatabaseUsersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_datastore_versions(self, request):
"""查询数据库版本信息
查询指定实例类型的数据库版本信息。
:param ListDatastoreVersionsRequest request
:return: ListDatastoreVersionsResponse
"""
return self.list_datastore_versions_with_http_info(request)
def list_datastore_versions_with_http_info(self, request):
"""查询数据库版本信息
查询指定实例类型的数据库版本信息。
:param ListDatastoreVersionsRequest request
:return: ListDatastoreVersionsResponse
"""
all_params = ['datastore_name']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'datastore_name' in local_var_params:
path_params['datastore_name'] = local_var_params['datastore_name']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/datastores/{datastore_name}/versions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDatastoreVersionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_error_logs(self, request):
"""查询数据库错误日志
查询数据库错误信息。
:param ListErrorLogsRequest request
:return: ListErrorLogsResponse
"""
return self.list_error_logs_with_http_info(request)
def list_error_logs_with_http_info(self, request):
"""查询数据库错误日志
查询数据库错误信息。
:param ListErrorLogsRequest request
:return: ListErrorLogsResponse
"""
all_params = ['instance_id', 'start_date', 'end_date', 'node_id', 'type', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'start_date' in local_var_params:
query_params.append(('start_date', local_var_params['start_date']))
if 'end_date' in local_var_params:
query_params.append(('end_date', local_var_params['end_date']))
if 'node_id' in local_var_params:
query_params.append(('node_id', local_var_params['node_id']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/errorlog',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListErrorLogsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_flavors(self, request):
"""查询所有实例规格信息
查询指定条件下的所有实例规格信息。
:param ListFlavorsRequest request
:return: ListFlavorsResponse
"""
return self.list_flavors_with_http_info(request)
def list_flavors_with_http_info(self, request):
"""查询所有实例规格信息
查询指定条件下的所有实例规格信息。
:param ListFlavorsRequest request
:return: ListFlavorsResponse
"""
all_params = ['region', 'engine_name']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'region' in local_var_params:
query_params.append(('region', local_var_params['region']))
if 'engine_name' in local_var_params:
query_params.append(('engine_name', local_var_params['engine_name']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/flavors',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListFlavorsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_instance_tags(self, request):
"""查询资源标签
查询指定实例的标签信息。
:param ListInstanceTagsRequest request
:return: ListInstanceTagsResponse
"""
return self.list_instance_tags_with_http_info(request)
def list_instance_tags_with_http_info(self, request):
"""查询资源标签
查询指定实例的标签信息。
:param ListInstanceTagsRequest request
:return: ListInstanceTagsResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListInstanceTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_instances(self, request):
"""查询实例列表
根据指定条件查询实例列表。
:param ListInstancesRequest request
:return: ListInstancesResponse
"""
return self.list_instances_with_http_info(request)
def list_instances_with_http_info(self, request):
"""查询实例列表
根据指定条件查询实例列表。
:param ListInstancesRequest request
:return: ListInstancesResponse
"""
all_params = ['id', 'name', 'mode', 'datastore_type', 'vpc_id', 'subnet_id', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'mode' in local_var_params:
query_params.append(('mode', local_var_params['mode']))
if 'datastore_type' in local_var_params:
query_params.append(('datastore_type', local_var_params['datastore_type']))
if 'vpc_id' in local_var_params:
query_params.append(('vpc_id', local_var_params['vpc_id']))
if 'subnet_id' in local_var_params:
query_params.append(('subnet_id', local_var_params['subnet_id']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListInstancesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_instances_by_tags(self, request):
"""查询资源实例
根据标签查询指定的数据库实例。
:param ListInstancesByTagsRequest request
:return: ListInstancesByTagsResponse
"""
return self.list_instances_by_tags_with_http_info(request)
def list_instances_by_tags_with_http_info(self, request):
"""查询资源实例
根据标签查询指定的数据库实例。
:param ListInstancesByTagsRequest request
:return: ListInstancesByTagsResponse
"""
all_params = ['list_instances_by_tags_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListInstancesByTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_project_tags(self, request):
"""查询项目标签
查询指定project ID下实例的所有标签集合。
:param ListProjectTagsRequest request
:return: ListProjectTagsResponse
"""
return self.list_project_tags_with_http_info(request)
def list_project_tags_with_http_info(self, request):
"""查询项目标签
查询指定project ID下实例的所有标签集合。
:param ListProjectTagsRequest request
:return: ListProjectTagsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListProjectTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_restore_collections(self, request):
"""获取可恢复的数据库集合列表
获取可恢复的数据库集合列表。
:param ListRestoreCollectionsRequest request
:return: ListRestoreCollectionsResponse
"""
return self.list_restore_collections_with_http_info(request)
def list_restore_collections_with_http_info(self, request):
"""获取可恢复的数据库集合列表
获取可恢复的数据库集合列表。
:param ListRestoreCollectionsRequest request
:return: ListRestoreCollectionsResponse
"""
all_params = ['instance_id', 'db_name', 'restore_time', 'x_language', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'db_name' in local_var_params:
query_params.append(('db_name', local_var_params['db_name']))
if 'restore_time' in local_var_params:
query_params.append(('restore_time', local_var_params['restore_time']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/restore-collection',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRestoreCollectionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_restore_databases(self, request):
"""获取可恢复的数据库列表
获取可恢复的数据库列表。
:param ListRestoreDatabasesRequest request
:return: ListRestoreDatabasesResponse
"""
return self.list_restore_databases_with_http_info(request)
def list_restore_databases_with_http_info(self, request):
"""获取可恢复的数据库列表
获取可恢复的数据库列表。
:param ListRestoreDatabasesRequest request
:return: ListRestoreDatabasesResponse
"""
all_params = ['instance_id', 'restore_time', 'x_language', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'restore_time' in local_var_params:
query_params.append(('restore_time', local_var_params['restore_time']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/restore-database',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRestoreDatabasesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_restore_times(self, request):
"""查询可恢复的时间段
查询实例的可恢复时间段。
:param ListRestoreTimesRequest request
:return: ListRestoreTimesResponse
"""
return self.list_restore_times_with_http_info(request)
def list_restore_times_with_http_info(self, request):
"""查询可恢复的时间段
查询实例的可恢复时间段。
:param ListRestoreTimesRequest request
:return: ListRestoreTimesResponse
"""
all_params = ['instance_id', 'date', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'date' in local_var_params:
query_params.append(('date', local_var_params['date']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/restore-time',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRestoreTimesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_sessions(self, request):
"""查询实例节点会话
查询实例节点会话。
:param ListSessionsRequest request
:return: ListSessionsResponse
"""
return self.list_sessions_with_http_info(request)
def list_sessions_with_http_info(self, request):
"""查询实例节点会话
查询实例节点会话。
:param ListSessionsRequest request
:return: ListSessionsResponse
"""
all_params = ['node_id', 'offset', 'limit', 'plan_summary', 'type', 'namespace', 'cost_time']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'node_id' in local_var_params:
path_params['node_id'] = local_var_params['node_id']
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'plan_summary' in local_var_params:
query_params.append(('plan_summary', local_var_params['plan_summary']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'namespace' in local_var_params:
query_params.append(('namespace', local_var_params['namespace']))
if 'cost_time' in local_var_params:
query_params.append(('cost_time', local_var_params['cost_time']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/nodes/{node_id}/sessions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSessionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_slow_logs(self, request):
"""查询数据库慢日志
查询数据库慢日志信息。
:param ListSlowLogsRequest request
:return: ListSlowLogsResponse
"""
return self.list_slow_logs_with_http_info(request)
def list_slow_logs_with_http_info(self, request):
"""查询数据库慢日志
查询数据库慢日志信息。
:param ListSlowLogsRequest request
:return: ListSlowLogsResponse
"""
all_params = ['instance_id', 'start_date', 'end_date', 'node_id', 'type', 'offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'start_date' in local_var_params:
query_params.append(('start_date', local_var_params['start_date']))
if 'end_date' in local_var_params:
query_params.append(('end_date', local_var_params['end_date']))
if 'node_id' in local_var_params:
query_params.append(('node_id', local_var_params['node_id']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/slowlog',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSlowLogsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_storage_type(self, request):
"""查询数据库磁盘类型
查询当前区域下的数据库磁盘类型。
:param ListStorageTypeRequest request
:return: ListStorageTypeResponse
"""
return self.list_storage_type_with_http_info(request)
def list_storage_type_with_http_info(self, request):
"""查询数据库磁盘类型
查询当前区域下的数据库磁盘类型。
:param ListStorageTypeRequest request
:return: ListStorageTypeResponse
"""
all_params = ['engine_name']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'engine_name' in local_var_params:
query_params.append(('engine_name', local_var_params['engine_name']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/storage-type',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListStorageTypeResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def migrate_az(self, request):
"""实例可用区迁移
实例可用区迁移。
:param MigrateAzRequest request
:return: MigrateAzResponse
"""
return self.migrate_az_with_http_info(request)
def migrate_az_with_http_info(self, request):
"""实例可用区迁移
实例可用区迁移。
:param MigrateAzRequest request
:return: MigrateAzResponse
"""
all_params = ['instance_id', 'migrate_az_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/migrate',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='MigrateAzResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def reset_password(self, request):
"""修改数据库用户密码
修改数据库用户密码。
:param ResetPasswordRequest request
:return: ResetPasswordResponse
"""
return self.reset_password_with_http_info(request)
def reset_password_with_http_info(self, request):
"""修改数据库用户密码
修改数据库用户密码。
:param ResetPasswordRequest request
:return: ResetPasswordResponse
"""
all_params = ['instance_id', 'reset_password_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/reset-password',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ResetPasswordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def resize_instance(self, request):
"""变更实例规格
变更实例的规格。
:param ResizeInstanceRequest request
:return: ResizeInstanceResponse
"""
return self.resize_instance_with_http_info(request)
def resize_instance_with_http_info(self, request):
"""变更实例规格
变更实例的规格。
:param ResizeInstanceRequest request
:return: ResizeInstanceResponse
"""
all_params = ['instance_id', 'resize_instance_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/resize',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ResizeInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def resize_instance_volume(self, request):
"""扩容实例存储容量
扩容实例相关的存储容量大小。
:param ResizeInstanceVolumeRequest request
:return: ResizeInstanceVolumeResponse
"""
return self.resize_instance_volume_with_http_info(request)
def resize_instance_volume_with_http_info(self, request):
"""扩容实例存储容量
扩容实例相关的存储容量大小。
:param ResizeInstanceVolumeRequest request
:return: ResizeInstanceVolumeResponse
"""
all_params = ['instance_id', 'resize_instance_volume_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/enlarge-volume',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ResizeInstanceVolumeResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def restart_instance(self, request):
"""重启实例
重启实例的数据库服务。
:param RestartInstanceRequest request
:return: RestartInstanceResponse
"""
return self.restart_instance_with_http_info(request)
def restart_instance_with_http_info(self, request):
"""重启实例
重启实例的数据库服务。
:param RestartInstanceRequest request
:return: RestartInstanceResponse
"""
all_params = ['instance_id', 'restart_instance_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/restart',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RestartInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def restore_instance(self, request):
"""恢复到当前实例
恢复到当前实例。
:param RestoreInstanceRequest request
:return: RestoreInstanceResponse
"""
return self.restore_instance_with_http_info(request)
def restore_instance_with_http_info(self, request):
"""恢复到当前实例
恢复到当前实例。
:param RestoreInstanceRequest request
:return: RestoreInstanceResponse
"""
all_params = ['restore_instance_request_body', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/recovery',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RestoreInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def restore_instance_from_collection(self, request):
"""库表级时间点恢复
库表级时间点恢复。
:param RestoreInstanceFromCollectionRequest request
:return: RestoreInstanceFromCollectionResponse
"""
return self.restore_instance_from_collection_with_http_info(request)
def restore_instance_from_collection_with_http_info(self, request):
"""库表级时间点恢复
库表级时间点恢复。
:param RestoreInstanceFromCollectionRequest request
:return: RestoreInstanceFromCollectionResponse
"""
all_params = ['instance_id', 'restore_instance_from_collection_request_body', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/restore/collections',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RestoreInstanceFromCollectionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def restore_new_instance(self, request):
"""恢复到新实例
根据备份恢复新实例。
:param RestoreNewInstanceRequest request
:return: RestoreNewInstanceResponse
"""
return self.restore_new_instance_with_http_info(request)
def restore_new_instance_with_http_info(self, request):
"""恢复到新实例
根据备份恢复新实例。
:param RestoreNewInstanceRequest request
:return: RestoreNewInstanceResponse
"""
all_params = ['restore_new_instance_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RestoreNewInstanceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def set_auditlog_policy(self, request):
"""设置审计日志策略
设置审计日志策略。
:param SetAuditlogPolicyRequest request
:return: SetAuditlogPolicyResponse
"""
return self.set_auditlog_policy_with_http_info(request)
def set_auditlog_policy_with_http_info(self, request):
"""设置审计日志策略
设置审计日志策略。
:param SetAuditlogPolicyRequest request
:return: SetAuditlogPolicyResponse
"""
all_params = ['instance_id', 'set_auditlog_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/auditlog-policy',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SetAuditlogPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def set_backup_policy(self, request):
"""设置自动备份策略
设置自动备份策略。
:param SetBackupPolicyRequest request
:return: SetBackupPolicyResponse
"""
return self.set_backup_policy_with_http_info(request)
def set_backup_policy_with_http_info(self, request):
"""设置自动备份策略
设置自动备份策略。
:param SetBackupPolicyRequest request
:return: SetBackupPolicyResponse
"""
all_params = ['instance_id', 'set_backup_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/backups/policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SetBackupPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def set_balancer_switch(self, request):
"""设置集群均衡开关
设置集群均衡开关。
:param SetBalancerSwitchRequest request
:return: SetBalancerSwitchResponse
"""
return self.set_balancer_switch_with_http_info(request)
def set_balancer_switch_with_http_info(self, request):
"""设置集群均衡开关
设置集群均衡开关。
:param SetBalancerSwitchRequest request
:return: SetBalancerSwitchResponse
"""
all_params = ['instance_id', 'action']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
if 'action' in local_var_params:
path_params['action'] = local_var_params['action']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/balancer/{action}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SetBalancerSwitchResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def set_balancer_window(self, request):
"""设置集群均衡活动时间窗
设置集群均衡活动时间窗。
:param SetBalancerWindowRequest request
:return: SetBalancerWindowResponse
"""
return self.set_balancer_window_with_http_info(request)
def set_balancer_window_with_http_info(self, request):
"""设置集群均衡活动时间窗
设置集群均衡活动时间窗。
:param SetBalancerWindowRequest request
:return: SetBalancerWindowResponse
"""
all_params = ['instance_id', 'balancer_active_window']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/balancer/active-window',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SetBalancerWindowResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_auditlog_policy(self, request):
"""查询审计日志策略
查询审计日志策略。
:param ShowAuditlogPolicyRequest request
:return: ShowAuditlogPolicyResponse
"""
return self.show_auditlog_policy_with_http_info(request)
def show_auditlog_policy_with_http_info(self, request):
"""查询审计日志策略
查询审计日志策略。
:param ShowAuditlogPolicyRequest request
:return: ShowAuditlogPolicyResponse
"""
all_params = ['instance_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/auditlog-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAuditlogPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_backup_download_link(self, request):
"""获取备份下载链接
获取备份下载链接。
:param ShowBackupDownloadLinkRequest request
:return: ShowBackupDownloadLinkResponse
"""
return self.show_backup_download_link_with_http_info(request)
def show_backup_download_link_with_http_info(self, request):
"""获取备份下载链接
获取备份下载链接。
:param ShowBackupDownloadLinkRequest request
:return: ShowBackupDownloadLinkResponse
"""
all_params = ['instance_id', 'backup_id', 'x_language']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'instance_id' in local_var_params:
query_params.append(('instance_id', local_var_params['instance_id']))
if 'backup_id' in local_var_params:
query_params.append(('backup_id', local_var_params['backup_id']))
header_params = {}
if 'x_language' in local_var_params:
header_params['X-Language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/backups/download-file',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowBackupDownloadLinkResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_backup_policy(self, request):
"""查询自动备份策略
查询自动备份策略。
:param ShowBackupPolicyRequest request
:return: ShowBackupPolicyResponse
"""
return self.show_backup_policy_with_http_info(request)
def show_backup_policy_with_http_info(self, request):
"""查询自动备份策略
查询自动备份策略。
:param ShowBackupPolicyRequest request
:return: ShowBackupPolicyResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/backups/policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowBackupPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_connection_statistics(self, request):
"""查询实例连接数统计信息
查询客户端IP访问至DDS数据库实例的连接数统计信息。
:param ShowConnectionStatisticsRequest request
:return: ShowConnectionStatisticsResponse
"""
return self.show_connection_statistics_with_http_info(request)
def show_connection_statistics_with_http_info(self, request):
"""查询实例连接数统计信息
查询客户端IP访问至DDS数据库实例的连接数统计信息。
:param ShowConnectionStatisticsRequest request
:return: ShowConnectionStatisticsResponse
"""
all_params = ['instance_id', 'node_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
if 'node_id' in local_var_params:
query_params.append(('node_id', local_var_params['node_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/conn-statistics',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowConnectionStatisticsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_sharding_balancer(self, request):
"""查询集群均衡设置
查询集群均衡设置。
:param ShowShardingBalancerRequest request
:return: ShowShardingBalancerResponse
"""
return self.show_sharding_balancer_with_http_info(request)
def show_sharding_balancer_with_http_info(self, request):
"""查询集群均衡设置
查询集群均衡设置。
:param ShowShardingBalancerRequest request
:return: ShowShardingBalancerResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/balancer',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowShardingBalancerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def switch_ssl(self, request):
"""切换SSL开关
切换实例的SSL开关
:param SwitchSslRequest request
:return: SwitchSslResponse
"""
return self.switch_ssl_with_http_info(request)
def switch_ssl_with_http_info(self, request):
"""切换SSL开关
切换实例的SSL开关
:param SwitchSslRequest request
:return: SwitchSslResponse
"""
all_params = ['instance_id', 'switch_ssl_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/switch-ssl',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SwitchSslResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def switchover_replica_set(self, request):
"""切换副本集实例的主备节点
切换副本集实例下的主备节点
:param SwitchoverReplicaSetRequest request
:return: SwitchoverReplicaSetResponse
"""
return self.switchover_replica_set_with_http_info(request)
def switchover_replica_set_with_http_info(self, request):
"""切换副本集实例的主备节点
切换副本集实例下的主备节点
:param SwitchoverReplicaSetRequest request
:return: SwitchoverReplicaSetResponse
"""
all_params = ['instance_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/switchover',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SwitchoverReplicaSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_instance_name(self, request):
"""修改实例名称
修改实例名称
:param UpdateInstanceNameRequest request
:return: UpdateInstanceNameResponse
"""
return self.update_instance_name_with_http_info(request)
def update_instance_name_with_http_info(self, request):
"""修改实例名称
修改实例名称
:param UpdateInstanceNameRequest request
:return: UpdateInstanceNameResponse
"""
all_params = ['instance_id', 'update_name_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/modify-name',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateInstanceNameResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_instance_port(self, request):
"""修改数据库端口
修改数据库实例的端口。
:param UpdateInstancePortRequest request
:return: UpdateInstancePortResponse
"""
return self.update_instance_port_with_http_info(request)
def update_instance_port_with_http_info(self, request):
"""修改数据库端口
修改数据库实例的端口。
:param UpdateInstancePortRequest request
:return: UpdateInstancePortResponse
"""
all_params = ['instance_id', 'update_port_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/modify-port',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateInstancePortResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_security_group(self, request):
"""变更实例安全组
变更实例关联的安全组
:param UpdateSecurityGroupRequest request
:return: UpdateSecurityGroupResponse
"""
return self.update_security_group_with_http_info(request)
def update_security_group_with_http_info(self, request):
"""变更实例安全组
变更实例关联的安全组
:param UpdateSecurityGroupRequest request
:return: UpdateSecurityGroupResponse
"""
all_params = ['instance_id', 'update_security_group_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'instance_id' in local_var_params:
path_params['instance_id'] = local_var_params['instance_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/{project_id}/instances/{instance_id}/modify-security-group',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateSecurityGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_version(self, request):
"""查询当前支持的API版本信息列表
查询当前支持的API版本信息列表。
:param ListApiVersionRequest request
:return: ListApiVersionResponse
"""
return self.list_api_version_with_http_info(request)
def list_api_version_with_http_info(self, request):
"""查询当前支持的API版本信息列表
查询当前支持的API版本信息列表。
:param ListApiVersionRequest request
:return: ListApiVersionResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiVersionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_api_version(self, request):
"""查询指定API版本信息
查询指定API版本信息。
:param ShowApiVersionRequest request
:return: ShowApiVersionResponse
"""
return self.show_api_version_with_http_info(request)
def show_api_version_with_http_info(self, request):
"""查询指定API版本信息
查询指定API版本信息。
:param ShowApiVersionRequest request
:return: ShowApiVersionResponse
"""
all_params = ['version']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version' in local_var_params:
path_params['version'] = local_var_params['version']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/{version}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowApiVersionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| 30.135589 | 117 | 0.621477 | 12,261 | 123,797 | 5.862572 | 0.037436 | 0.047189 | 0.082581 | 0.033611 | 0.914665 | 0.901796 | 0.869799 | 0.844716 | 0.838441 | 0.685299 | 0 | 0.000798 | 0.291445 | 123,797 | 4,107 | 118 | 30.142927 | 0.818667 | 0.104138 | 0 | 0.821338 | 0 | 0 | 0.110515 | 0.047793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052921 | false | 0.00508 | 0.004234 | 0 | 0.111346 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1c1594f9fcf5265122c6f692d7a118db69ffd4ee | 5,888 | py | Python | dfirtrack_main/tests/generic_views/test_main_overview_urls.py | cclauss/dfirtrack | 2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a | [
"Apache-2.0"
] | null | null | null | dfirtrack_main/tests/generic_views/test_main_overview_urls.py | cclauss/dfirtrack | 2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a | [
"Apache-2.0"
] | null | null | null | dfirtrack_main/tests/generic_views/test_main_overview_urls.py | cclauss/dfirtrack | 2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a | [
"Apache-2.0"
] | null | null | null | import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from dfirtrack_config.models import MainConfigModel
def set_main_overview(main_overview):
""" change config """
model = MainConfigModel.objects.get(main_config_name='MainConfig')
model.main_overview = f'main_overview_{main_overview}'
model.save()
# return to test function
return
class MainOverviewViewTestCase(TestCase):
""" main overview view tests """
@classmethod
def setUpTestData(cls):
# create user
User.objects.create_user(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
def test_main_overview_not_logged_in(self):
""" test main overview """
# create url
destination = '/login/?next=' + urllib.parse.quote('/main_overview/', safe='')
# get response
response = self.client.get('/main_overview/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_system_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('system')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/system/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_artifact_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('artifact')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/artifacts/artifact/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_case_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('case')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/case/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_status_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('status')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/config/status/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_tag_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('tag')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/tag/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_task_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('task')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/task/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_main_overview_default_url(self):
""" test main overview url and redirect """
# change config
set_main_overview('foobar')
# login testuser
self.client.login(username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP')
# get reverse url
url = reverse('main_overview')
# compare url
self.assertEqual(url, '/main_overview/')
# create url
destination = urllib.parse.quote('/system/')
# get response
response = self.client.get('/main_overview/')
# compare redirect
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
| 34.432749 | 100 | 0.652514 | 630 | 5,888 | 5.906349 | 0.119048 | 0.193496 | 0.068799 | 0.060199 | 0.823166 | 0.814566 | 0.799516 | 0.799516 | 0.787691 | 0.787691 | 0 | 0.012601 | 0.245245 | 5,888 | 170 | 101 | 34.635294 | 0.824707 | 0.182405 | 0 | 0.513514 | 0 | 0 | 0.175319 | 0.043617 | 0 | 0 | 0 | 0 | 0.202703 | 1 | 0.135135 | false | 0.108108 | 0.067568 | 0 | 0.22973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
1c1cdec51f37c92febc3016cca8f58d4c6829958 | 203 | py | Python | aitoolbox/torchtrain/train_loop/__init__.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | aitoolbox/torchtrain/train_loop/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | aitoolbox/torchtrain/train_loop/__init__.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | from aitoolbox.torchtrain.train_loop.train_loop import TrainLoop
from aitoolbox.torchtrain.train_loop.train_loop_tracking import (
TrainLoopCheckpoint, TrainLoopEndSave, TrainLoopCheckpointEndSave
)
| 40.6 | 69 | 0.871921 | 21 | 203 | 8.190476 | 0.52381 | 0.209302 | 0.267442 | 0.325581 | 0.476744 | 0.476744 | 0.476744 | 0 | 0 | 0 | 0 | 0 | 0.078818 | 203 | 4 | 70 | 50.75 | 0.919786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
1c70cff8cb26ec6b22dbbef53b91f5d00f83ddb3 | 33 | py | Python | function_20373952.py | YJoJ/Study-16 | 4d2b6d0d50185a904be856b7bb69e310dbe7df0f | [
"MIT"
] | 1 | 2022-03-19T08:09:36.000Z | 2022-03-19T08:09:36.000Z | function_20373952.py | YJoJ/Study-16 | 4d2b6d0d50185a904be856b7bb69e310dbe7df0f | [
"MIT"
] | null | null | null | function_20373952.py | YJoJ/Study-16 | 4d2b6d0d50185a904be856b7bb69e310dbe7df0f | [
"MIT"
] | 1 | 2022-03-19T08:09:48.000Z | 2022-03-19T08:09:48.000Z | print('My student_id: 20373952')
| 16.5 | 32 | 0.757576 | 5 | 33 | 4.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.266667 | 0.090909 | 33 | 1 | 33 | 33 | 0.533333 | 0 | 0 | 0 | 0 | 0 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
1c7ffeda408e632f7ea1ff7b80809903bce9c2e2 | 6,340 | py | Python | hyperglass/api/fake_output.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 298 | 2019-06-17T13:51:46.000Z | 2021-06-23T18:09:51.000Z | hyperglass/api/fake_output.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 137 | 2019-06-18T12:59:37.000Z | 2021-06-19T05:50:58.000Z | hyperglass/api/fake_output.py | blkmajik/hyperglass | c52a6f609843177671d38bcad59b8bd658f46b64 | [
"BSD-3-Clause-Clear"
] | 42 | 2019-06-18T07:25:23.000Z | 2021-06-18T17:40:20.000Z | """Return fake, static data for development purposes."""
# Standard Library
from typing import Dict, Union
PLAIN = r"""
BGP routing table entry for 4.0.0.0/9, version 1017877672
BGP Bestpath: deterministic-med
Paths: (10 available, best #9, table default)
Advertised to update-groups:
50
1299 3356, (aggregated by 3356 4.69.130.24)
216.250.230.1 (metric 2000) from 216.250.230.1 (216.250.230.1)
Origin IGP, metric 0, localpref 100, weight 100, valid, internal, atomic-aggregate
Community: 1299:25000 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3003 14525:4002 14525:9003
1299 3356, (aggregated by 3356 4.69.130.24), (received-only)
216.250.230.1 (metric 2000) from 216.250.230.1 (216.250.230.1)
Origin IGP, metric 0, localpref 150, valid, internal, atomic-aggregate
Community: 1299:25000 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3003 14525:4002 14525:9003
1299 3356, (aggregated by 3356 4.69.130.184)
199.34.92.9 (metric 1000) from 199.34.92.9 (199.34.92.9)
Origin IGP, metric 0, localpref 100, weight 100, valid, internal, atomic-aggregate
Community: 1299:25000 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3001 14525:4001 14525:9003
1299 3356, (aggregated by 3356 4.69.130.184), (received-only)
199.34.92.9 (metric 1000) from 199.34.92.9 (199.34.92.9)
Origin IGP, metric 0, localpref 150, valid, internal, atomic-aggregate
Community: 1299:25000 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3001 14525:4001 14525:9003
174 3356, (aggregated by 3356 4.69.130.4)
199.34.92.10 (metric 1000) from 199.34.92.10 (199.34.92.10)
Origin IGP, metric 0, localpref 100, weight 100, valid, internal, atomic-aggregate
Community: 174:21000 174:22013 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3001 14525:4001 14525:9001
174 3356, (aggregated by 3356 4.69.130.4), (received-only)
199.34.92.10 (metric 1000) from 199.34.92.10 (199.34.92.10)
Origin IGP, metric 0, localpref 150, valid, internal, atomic-aggregate
Community: 174:21000 174:22013 14525:0 14525:40 14525:601 14525:1021 14525:2840 14525:3001 14525:4001 14525:9001
209 3356, (aggregated by 3356 4.69.130.2)
199.34.92.5 (metric 101) from 199.34.92.5 (199.34.92.5)
Origin IGP, metric 8006570, localpref 150, weight 200, valid, internal, atomic-aggregate
Community: 209:88 209:888 3356:0 3356:3 3356:100 3356:123 3356:575 3356:2011 14525:0 14525:40 14525:1021 14525:2840 14525:3002 14525:4003 14525:9005
209 3356, (aggregated by 3356 4.69.130.2), (received-only)
199.34.92.5 (metric 101) from 199.34.92.5 (199.34.92.5)
Origin IGP, metric 8006570, localpref 150, valid, internal, atomic-aggregate
Community: 209:88 209:888 3356:0 3356:3 3356:100 3356:123 3356:575 3356:2011 14525:0 14525:40 14525:1021 14525:2840 14525:3002 14525:4003 14525:9005
6939 3356, (aggregated by 3356 4.69.130.4)
184.105.247.177 from 184.105.247.177 (216.218.252.234)
Origin IGP, localpref 150, weight 200, valid, external, atomic-aggregate, best
Community: 6939:7016 6939:8840 6939:9001 14525:0 14525:40 14525:1021 14525:2840 14525:3002 14525:4003 14525:9002
6939 3356, (aggregated by 3356 4.69.130.4), (received-only)
184.105.247.177 from 184.105.247.177 (216.218.252.234)
Origin IGP, localpref 100, valid, external, atomic-aggregate
Community: 6939:7016 6939:8840 6939:9001
""" # noqa: W291,E501
ROUTES = [
{
"prefix": "1.1.1.0/24",
"active": True,
"age": 1025337,
"weight": 170,
"med": 0,
"local_preference": 175,
"as_path": [1299, 13335],
"communities": [
"1299:35000",
"14525:0",
"14525:41",
"14525:600",
"14525:1021",
"14525:2840",
"14525:3001",
"14525:4001",
"14525:9003",
],
"next_hop": "62.115.189.136",
"source_as": 13335,
"source_rid": "141.101.72.1",
"peer_rid": "2.255.254.43",
"rpki_state": 1,
},
{
"prefix": "1.1.1.0/24",
"active": False,
"age": 1584622,
"weight": 200,
"med": 0,
"local_preference": 250,
"as_path": [13335],
"communities": [
"14525:0",
"14525:20",
"14525:600",
"14525:1021",
"14525:2840",
"14525:3002",
"14525:4003",
"14525:9009",
],
"next_hop": "",
"source_as": 13335,
"source_rid": "172.68.129.1",
"peer_rid": "199.34.92.5",
"rpki_state": 3,
},
{
"prefix": "1.1.1.0/24",
"active": False,
"age": 982517,
"weight": 200,
"med": 0,
"local_preference": 250,
"as_path": [13335],
"communities": [
"14525:0",
"14525:20",
"14525:600",
"14525:1021",
"14525:2840",
"14525:3002",
"14525:4003",
"14525:9009",
],
"next_hop": "",
"source_as": 13335,
"source_rid": "172.68.129.1",
"peer_rid": "199.34.92.6",
"rpki_state": 3,
},
{
"prefix": "1.1.1.0/24",
"active": False,
"age": 1000101,
"weight": 200,
"med": 0,
"local_preference": 250,
"as_path": [13335],
"communities": [
"13335:10014",
"13335:19000",
"13335:20050",
"13335:20500",
"13335:20530",
"14525:0",
"14525:20",
"14525:600",
"14525:1021",
"14525:2840",
"14525:3003",
"14525:4002",
"14525:9009",
],
"next_hop": "",
"source_as": 13335,
"source_rid": "141.101.73.1",
"peer_rid": "216.250.230.2",
"rpki_state": 3,
},
]
STRUCTURED = {
"vrf": "default",
"count": len(ROUTES),
"routes": ROUTES,
"winning_weight": "high",
}
async def fake_output(structured: bool) -> Union[str, Dict]:
"""Bypass the standard execution process and return static, fake output."""
output = PLAIN
if structured:
output = STRUCTURED
return output
| 36.228571 | 154 | 0.579338 | 892 | 6,340 | 4.084081 | 0.200673 | 0.02745 | 0.03843 | 0.064233 | 0.78891 | 0.775185 | 0.775185 | 0.740598 | 0.740598 | 0.674993 | 0 | 0.41596 | 0.274606 | 6,340 | 174 | 155 | 36.436782 | 0.376169 | 0.013249 | 0 | 0.552147 | 0 | 0.116564 | 0.692906 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.006135 | 0 | 0.01227 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
98f3e0cffabbba3d3413940b974d580fd014224c | 12,551 | py | Python | verification/testcases/unit_testcases/test_verification_service.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | verification/testcases/unit_testcases/test_verification_service.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | verification/testcases/unit_testcases/test_verification_service.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | import unittest
import json
from unittest.mock import patch
from verification.services.verification_service import VerificationService
from requests import Response
class VerificationServiceTestCase(unittest.TestCase):
@patch("requests.get")
def test_get_fields(self, mock_requests):
mock_response_json = [
{
"PersonInfo": {
"title": "PersonInfo",
"type": "object",
"properties": {
"FirstGivenName": {
"type": "string",
"description": "First name of the individual to be verified",
"label": "First Name"
},
"MiddleName": {
"type": "string",
"description": "Second given name of the individual to be verified",
"label": "Middle Name"
},
"FirstSurName": {
"type": "string",
"description": "Last name of the individual to be verified",
"label": "Last Name"
},
"DayOfBirth": {
"type": "int",
"description": "Day of birth date (e.g. 23 for a date of birth of 23/11/1975)",
"label": "Day Of Birth"
},
"MonthOfBirth": {
"type": "int",
"description": "Month of birth date (e.g. 11 for a date of birth of 23/11/1975)",
"label": "Month Of Birth"
},
"YearOfBirth": {
"type": "int",
"description": "Year of birth date (e.g. 1975 for a date of birth of 23/11/1975)",
"label": "Year Of Birth"
}
},
"required": [
"DayOfBirth",
"FirstGivenName",
"FirstSurName",
"MonthOfBirth",
"YearOfBirth"
]
},
"Location": {
"title": "Location",
"type": "object",
"properties": {
"BuildingNumber": {
"type": "string",
"description": "Street number of primary residence",
"label": "Street Number"
},
"UnitNumber": {
"type": "string",
"description": "Flat/Unit/Apartment number of primary residence",
"label": "Unit Number"
},
"StreetName": {
"type": "string",
"description": "Street name of primary residence",
"label": "Street Name"
},
"StreetType": {
"type": "string",
"description": "Street type of primary residence (e.g. St, Rd, etc.)",
"label": "Street Type"
},
"Suburb": {
"type": "string",
"description": "City or Suburb of primary residence",
"label": "Suburb"
},
"StateProvinceCode": {
"type": "string",
"description": "State of primary residence. US sources expect 2 characters. Australian sources expect 2 or 3 characters.",
"label": "State"
},
"PostalCode": {
"type": "string",
"description": "ZIP Code or Postal Code of primary residence",
"label": "Postal Code"
}
},
"required": [
"PostalCode",
"StreetName"
]
},
"Communication": {
"title": "Communication",
"type": "object",
"properties": {
"MobileNumber": {
"type": "string",
"description": "Cellular phone number",
"label": "Cell Number"
},
"Telephone": {
"type": "string",
"description": "Telephone number of the individual to be verified",
"label": "Telephone"
}
},
"required": []
},
"Passport": {
"title": "Passport",
"type": "object",
"properties": {
"Number": {
"type": "string",
"description": "Passport number of the individual to be verified",
"label": "Passport Number"
}
},
"required": [
"Number"
]
},
"CountrySpecific": {
"title": "CountrySpecific",
"type": "object",
"properties": {
"AU": {
"title": "AU",
"type": "object",
"properties": {
"PassportCountry": {
"type": "string",
"description": "Passport Country (ISO 3166-1 alpha-2)",
"label": "Passport Country"
},
"PassportNumber": {
"type": "string",
"description": "Passport number of the individual to be verified",
"label": "Passport Number"
}
},
"required": [
"PassportCountry",
"PassportNumber"
]
}
}
}
}
]
response_obj = Response()
response_obj.__setattr__("status_code", 200)
response_obj.__setattr__("_content", json.dumps(
mock_response_json).encode("utf-8"))
mock_requests.return_value = response_obj
configuration_name = "Identity Verification"
country_code = "AU"
response = VerificationService().get_fields(configuration_name, country_code)
assert(response == [{'PersonInfo': {'title': 'PersonInfo', 'type': 'object', 'properties': {'FirstGivenName': {'type': 'string', 'description': 'First name of the individual to be verified', 'label': 'First Name'}, 'MiddleName': {'type': 'string', 'description': 'Second given name of the individual to be verified', 'label': 'Middle Name'}, 'FirstSurName': {'type': 'string', 'description': 'Last name of the individual to be verified', 'label': 'Last Name'}, 'DayOfBirth': {'type': 'int', 'description': 'Day of birth date (e.g. 23 for a date of birth of 23/11/1975)', 'label': 'Day Of Birth'}, 'MonthOfBirth': {'type': 'int', 'description': 'Month of birth date (e.g. 11 for a date of birth of 23/11/1975)', 'label': 'Month Of Birth'}, 'YearOfBirth': {'type': 'int', 'description': 'Year of birth date (e.g. 1975 for a date of birth of 23/11/1975)', 'label': 'Year Of Birth'}}, 'required': ['DayOfBirth', 'FirstGivenName', 'FirstSurName', 'MonthOfBirth', 'YearOfBirth']}, 'Location': {'title': 'Location', 'type': 'object', 'properties': {'BuildingNumber': {'type': 'string', 'description': 'Street number of primary residence', 'label': 'Street Number'}, 'UnitNumber': {'type': 'string', 'description': 'Flat/Unit/Apartment number of primary residence', 'label': 'Unit Number'}, 'StreetName': {'type': 'string', 'description': 'Street name of primary residence', 'label': 'Street Name'}, 'StreetType': {'type': 'string', 'description': 'Street type of primary residence (e.g. St, Rd, etc.)',
'label': 'Street Type'}, 'Suburb': {'type': 'string', 'description': 'City or Suburb of primary residence', 'label': 'Suburb'}, 'StateProvinceCode': {'type': 'string', 'description': 'State of primary residence. US sources expect 2 characters. Australian sources expect 2 or 3 characters.', 'label': 'State'}, 'PostalCode': {'type': 'string', 'description': 'ZIP Code or Postal Code of primary residence', 'label': 'Postal Code'}}, 'required': ['PostalCode', 'StreetName']}, 'Communication': {'title': 'Communication', 'type': 'object', 'properties': {'MobileNumber': {'type': 'string', 'description': 'Cellular phone number', 'label': 'Cell Number'}, 'Telephone': {'type': 'string', 'description': 'Telephone number of the individual to be verified', 'label': 'Telephone'}}, 'required': []}, 'Passport': {'title': 'Passport', 'type': 'object', 'properties': {'Number': {'type': 'string', 'description': 'Passport number of the individual to be verified', 'label': 'Passport Number'}}, 'required': ['Number']}, 'CountrySpecific': {'title': 'CountrySpecific', 'type': 'object', 'properties': {'AU': {'title': 'AU', 'type': 'object', 'properties': {'PassportCountry': {'type': 'string', 'description': 'Passport Country (ISO 3166-1 alpha-2)', 'label': 'Passport Country'}, 'PassportNumber': {'type': 'string', 'description': 'Passport number of the individual to be verified', 'label': 'Passport Number'}}, 'required': ['PassportCountry', 'PassportNumber']}}}}])
@patch("requests.get")
def test_get_document_types(self, mock_requests):
mock_response_json = {
"DE": [
"DrivingLicence",
"IdentityCard",
"Passport",
"ResidencePermit"
]
}
response_obj = Response()
response_obj.__setattr__("status_code", 200)
response_obj.__setattr__("_content", json.dumps(
mock_response_json).encode("utf-8"))
mock_requests.return_value = response_obj
country_code = "DE"
reponse = VerificationService().get_document_types(country_code)
assert(reponse == {
"DE": ["DrivingLicence", "IdentityCard", "Passport", "ResidencePermit"]
})
| 63.388889 | 2,875 | 0.39216 | 826 | 12,551 | 5.889831 | 0.154964 | 0.061665 | 0.129496 | 0.041932 | 0.913875 | 0.892909 | 0.869065 | 0.869065 | 0.869065 | 0.869065 | 0 | 0.01418 | 0.494303 | 12,551 | 197 | 2,876 | 63.71066 | 0.752324 | 0 | 0 | 0.26738 | 0 | 0.032086 | 0.359095 | 0 | 0 | 0 | 0 | 0 | 0.010695 | 1 | 0.010695 | false | 0.080214 | 0.026738 | 0 | 0.042781 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
c722a307bf0026b9faacd97ed30e5d7aee4cfdfb | 1,290 | py | Python | synopsis.py | IloveKanade/k3modutil | fddf9b27325acdc994e6f66c7a3a6e88006f37b8 | [
"MIT"
] | null | null | null | synopsis.py | IloveKanade/k3modutil | fddf9b27325acdc994e6f66c7a3a6e88006f37b8 | [
"MIT"
] | 1 | 2022-03-23T07:01:00.000Z | 2022-03-23T07:01:00.000Z | synopsis.py | IloveKanade/k3modutil | fddf9b27325acdc994e6f66c7a3a6e88006f37b8 | [
"MIT"
] | 1 | 2021-09-02T08:58:00.000Z | 2021-09-02T08:58:00.000Z | import k3modutil
import pykit
k3modutil.submodules(pykit)
# {
# 'modutil': <module> pykit.modutil,
# ... ...
# }
k3modutil.submodule_tree(pykit)
# {
# 'modutil': {'module': <module> pykit.modutil,
# 'children': {
# 'modutil': {
# 'module': <module> pykit.modutil.modutil,
# 'children': None,
# },
# 'test': {
# 'module': <module> pykit.modutil.test,
# 'children': {
# 'test_modutil': {
# 'module': <module> pykit.modutil.test.test_modutil,
# 'children': None,
# },
# },
# }
# },
# }
# ... ...
# }
k3modutil.submodule_leaf_tree(pykit)
# {
# 'modutil': {
# 'modutil': <module> pykit.modutil.modutil,
# 'test': {'test_modutil': <module> pykit.modutil.test.test_modutil},
# }
# ... ...
# }
| 32.25 | 97 | 0.324031 | 67 | 1,290 | 6.134328 | 0.179104 | 0.291971 | 0.306569 | 0.233577 | 0.411192 | 0.160584 | 0 | 0 | 0 | 0 | 0 | 0.00678 | 0.542636 | 1,290 | 39 | 98 | 33.076923 | 0.689831 | 0.820155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
c728232b64e40754d5323143e6a293df7b9b431c | 9,705 | py | Python | tests/test_skel.py | vltr/middle-schema | 8913ea7c5b0952265db7e8b0a78489fa3bc96fad | [
"MIT"
] | 2 | 2018-09-01T08:45:21.000Z | 2019-02-06T15:35:22.000Z | tests/test_skel.py | vltr/middle-schema | 8913ea7c5b0952265db7e8b0a78489fa3bc96fad | [
"MIT"
] | 2 | 2018-07-27T20:33:15.000Z | 2018-09-07T15:43:16.000Z | tests/test_skel.py | vltr/middle-schema | 8913ea7c5b0952265db7e8b0a78489fa3bc96fad | [
"MIT"
] | null | null | null | import enum
import typing as t
import middle
import pytest
from middle.exceptions import InvalidType
from middle_schema.skel import Skeleton
from middle_schema.skel import translate
def test_simple_model():
class TestModel(middle.Model):
name = middle.field(type=str, description="The name", min_length=5)
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert not skel.has_default_value
assert skel.name == "TestModel"
assert skel.description is None
assert not skel.nullable
assert skel.validator_data is None
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert skel.children is None
assert skel.type == str
assert not skel.has_default_value
assert skel.name == "name"
assert skel.description == "The name"
assert not skel.nullable
assert skel.validator_data.rules == {"min_length": 5}
assert skel.validator_data.type_check == str
def test_simple_model_with_typing():
class TestModel(middle.Model):
__description__ = "Test model for unit tests"
name = middle.field(
type=t.List[str], description="List of names", default=[]
)
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert not skel.has_default_value
assert skel.name == "TestModel"
assert skel.description == "Test model for unit tests"
assert not skel.nullable
assert skel.validator_data is None
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == t.List[str]
assert skel.has_default_value
assert skel.default_value == []
assert skel.name == "name"
assert skel.description == "List of names"
assert not skel.nullable
assert skel.validator_data.rules is None
assert skel.validator_data.type_check == list
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert skel.children is None
assert skel.type == str
assert not skel.has_default_value
assert skel.validator_data is None
assert skel.name is None
assert skel.type_specific is None
assert skel.description is None
assert skel.nullable is False
def test_enum_choices():
@enum.unique
class TestIntEnum(enum.IntEnum):
TEST_1 = 1
TEST_2 = 2
TEST_3 = 3
class TestModel(middle.Model):
some_enum = middle.field(type=TestIntEnum)
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert skel.name == "TestModel"
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestIntEnum
assert skel.name == "some_enum"
assert skel.description is None
assert not skel.nullable
assert skel.validator_data.rules is None
assert skel.validator_data.type_check == TestIntEnum
assert skel.type_specific is not None
assert skel.type_specific == {"choices": [1, 2, 3]}
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert skel.children is None
assert skel.type == int
assert not skel.has_default_value
assert skel.validator_data is None
assert skel.name is None
assert skel.type_specific is None
assert skel.description is None
assert skel.nullable is False
def test_dict_type():
class TestModel(middle.Model):
options = middle.field(
type=t.Dict[str, str],
description="Options for TestModel",
min_properties=1,
)
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert skel.name == "TestModel"
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == t.Dict[str, str]
assert not skel.has_default_value
assert skel.name == "options"
assert skel.description == "Options for TestModel"
assert not skel.nullable
assert skel.validator_data.type_check == dict
assert skel.validator_data.rules == {"min_properties": 1}
assert skel.type_specific is None
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert skel.children is None
assert skel.type == str
assert not skel.has_default_value
assert skel.validator_data is None
assert skel.name is None
assert skel.type_specific is None
assert skel.description is None
assert skel.nullable is False
def test_invalid_dict_type():
class TestModel(middle.Model):
options = middle.field(type=t.Dict[float, str])
with pytest.raises(TypeError):
translate(TestModel)
def test_invalid_type_for_schema():
class TestModel(middle.Model):
name = middle.field(type=t.Tuple[str, int])
with pytest.raises(InvalidType):
translate(TestModel)
def test_optional_type():
class TestModel(middle.Model):
maybe_name = middle.field(type=t.Optional[str])
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert skel.name == "TestModel"
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == t.Union[str, middle.compat.NoneType]
assert not skel.has_default_value
assert skel.name == "maybe_name"
assert skel.description is None
assert skel.nullable
assert skel.validator_data.type_check == (str, middle.compat.NoneType)
assert skel.validator_data.rules is None
assert skel.type_specific is None
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert skel.children is None
assert skel.type == str
assert not skel.has_default_value
assert skel.validator_data is None
assert skel.name is None
assert skel.type_specific is None
assert skel.description is None
assert skel.nullable is False
def test_union_type_nullable():
class TestModel(middle.Model):
lots_of_values = middle.field(type=t.Union[None, str, int])
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert skel.name == "TestModel"
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 2
assert skel.type == t.Union[middle.compat.NoneType, str, int]
assert not skel.has_default_value
assert skel.name == "lots_of_values"
assert skel.description is None
assert skel.nullable
assert skel.validator_data.type_check == (middle.compat.NoneType, str, int)
assert skel.validator_data.rules is None
assert skel.type_specific == {"any_of": True}
skel_str = skel.children[0]
assert isinstance(skel_str, Skeleton)
assert skel_str.children is None
assert skel_str.type == str
assert not skel_str.has_default_value
assert skel_str.validator_data is None
assert skel_str.name is None
assert skel_str.type_specific is None
assert skel_str.description is None
assert skel_str.nullable is False
skel_int = skel.children[1]
assert isinstance(skel_int, Skeleton)
assert skel_int.children is None
assert skel_int.type == int
assert not skel_int.has_default_value
assert skel_int.validator_data is None
assert skel_int.name is None
assert skel_int.type_specific is None
assert skel_int.description is None
assert skel_int.nullable is False
def test_union_type_not_nullable():
class TestModel(middle.Model):
lots_of_values = middle.field(type=t.Union[str, int, float])
skel = translate(TestModel)
assert isinstance(skel, Skeleton)
assert len(skel.children) == 1
assert skel.type == TestModel
assert skel.name == "TestModel"
skel = skel.children[0]
assert isinstance(skel, Skeleton)
assert len(skel.children) == 3
assert skel.type == t.Union[str, int, float]
assert not skel.has_default_value
assert skel.name == "lots_of_values"
assert skel.description is None
assert not skel.nullable
assert skel.validator_data.type_check == (str, int, float)
assert skel.validator_data.rules is None
assert skel.type_specific == {"any_of": True}
skel_str = skel.children[0]
assert isinstance(skel_str, Skeleton)
assert skel_str.children is None
assert skel_str.type == str
assert not skel_str.has_default_value
assert skel_str.validator_data is None
assert skel_str.name is None
assert skel_str.type_specific is None
assert skel_str.description is None
assert skel_str.nullable is False
skel_int = skel.children[1]
assert isinstance(skel_int, Skeleton)
assert skel_int.children is None
assert skel_int.type == int
assert not skel_int.has_default_value
assert skel_int.validator_data is None
assert skel_int.name is None
assert skel_int.type_specific is None
assert skel_int.description is None
assert skel_int.nullable is False
skel_float = skel.children[2]
assert isinstance(skel_float, Skeleton)
assert skel_float.children is None
assert skel_float.type == float
assert not skel_float.has_default_value
assert skel_float.validator_data is None
assert skel_float.name is None
assert skel_float.type_specific is None
assert skel_float.description is None
assert skel_float.nullable is False
| 29.49848 | 79 | 0.706955 | 1,340 | 9,705 | 4.98209 | 0.059701 | 0.190234 | 0.100659 | 0.127022 | 0.866986 | 0.818454 | 0.770821 | 0.761833 | 0.748652 | 0.721839 | 0 | 0.005495 | 0.212468 | 9,705 | 328 | 80 | 29.588415 | 0.867984 | 0 | 0 | 0.698842 | 0 | 0 | 0.031118 | 0 | 0 | 0 | 0 | 0 | 0.718147 | 1 | 0.034749 | false | 0 | 0.027027 | 0 | 0.150579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c78a4048174967f54dcebcf0bf5b27c6ef739cbe | 11,582 | py | Python | project/tests/disable_test_api_as_admin.py | bhs-contests/barberscore-api | 7bd06b074c99903f031220f41b15a22474724044 | [
"BSD-2-Clause"
] | null | null | null | project/tests/disable_test_api_as_admin.py | bhs-contests/barberscore-api | 7bd06b074c99903f031220f41b15a22474724044 | [
"BSD-2-Clause"
] | 9 | 2020-06-05T22:17:17.000Z | 2022-03-12T00:04:00.000Z | project/tests/disable_test_api_as_admin.py | bhs-contests/barberscore-api | 7bd06b074c99903f031220f41b15a22474724044 | [
"BSD-2-Clause"
] | null | null | null |
# Third-Party
import pytest
from rest_framework import status
# Django
from django.urls import reverse
pytestmark = pytest.mark.django_db
def test_api_endpoint(admin_api_client, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('api-root')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_appearance_endpoint(admin_api_client, appearance, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('appearance-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('appearance-detail', args=(str(appearance.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_assignment_endpoint(admin_api_client, assignment, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('assignment-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('assignment-detail', args=(str(assignment.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_award_endpoint(admin_api_client, award, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('award-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('award-detail', args=(str(award.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_chart_endpoint(admin_api_client, chart, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('chart-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('chart-detail', args=(str(chart.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_contest_endpoint(admin_api_client, contest, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('contest-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('contest-detail', args=(str(contest.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_contestant_endpoint(admin_api_client, contestant, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('contestant-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('contestant-detail', args=(str(contestant.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_contender_endpoint(admin_api_client, contender, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('contender-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('contender-detail', args=(str(contender.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_convention_endpoint(admin_api_client, convention, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('convention-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('convention-detail', args=(str(convention.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_entry_endpoint(admin_api_client, entry, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('entry-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('entry-detail', args=(str(entry.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_grid_endpoint(admin_api_client, grid, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('grid-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('grid-detail', args=(str(grid.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_group_endpoint(admin_api_client, group, django_assert_max_num_queries):
with django_assert_max_num_queries(14):
path = reverse('group-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(14):
path = reverse('group-detail', args=(str(group.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_member_endpoint(admin_api_client, member, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('member-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('member-detail', args=(str(member.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_officer_endpoint(admin_api_client, officer, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('officer-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('officer-detail', args=(str(officer.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_outcome_endpoint(admin_api_client, outcome, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('outcome-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('outcome-detail', args=(str(outcome.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_panelist_endpoint(admin_api_client, panelist, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('panelist-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('panelist-detail', args=(str(panelist.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_person_endpoint(admin_api_client, person, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('person-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('person-detail', args=(str(person.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_repertory_endpoint(admin_api_client, repertory, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('repertory-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('repertory-detail', args=(str(repertory.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_round_endpoint(admin_api_client, round, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('round-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('round-detail', args=(str(round.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_score_endpoint(admin_api_client, score, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('score-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('score-detail', args=(str(score.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_session_endpoint(admin_api_client, session, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('session-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('session-detail', args=(str(session.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_song_endpoint(admin_api_client, song, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('song-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('song-detail', args=(str(song.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_venue_endpoint(admin_api_client, venue, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('venue-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('venue-detail', args=(str(venue.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
def test_user_endpoint(admin_api_client, user, django_assert_max_num_queries):
with django_assert_max_num_queries(10):
path = reverse('user-list')
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
with django_assert_max_num_queries(10):
path = reverse('user-detail', args=(str(user.id),))
response = admin_api_client.get(path)
assert response.status_code == status.HTTP_200_OK
| 42.896296 | 90 | 0.725868 | 1,598 | 11,582 | 4.874844 | 0.039424 | 0.072914 | 0.127599 | 0.164056 | 0.818357 | 0.818357 | 0.818357 | 0.818357 | 0.818357 | 0.818357 | 0 | 0.024695 | 0.17838 | 11,582 | 269 | 91 | 43.055762 | 0.793926 | 0.001554 | 0 | 0.652778 | 0 | 0 | 0.051038 | 0 | 0 | 0 | 0 | 0 | 0.546296 | 1 | 0.111111 | false | 0 | 0.013889 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c7abfd8d3bedfccdd773a9884869f2557ec8374b | 39,261 | py | Python | neurora/corr_cal.py | neurora/neurora.io | eff6b715c89daae499aeb75450a26657d8cd3e4c | [
"MIT"
] | 50 | 2019-08-29T06:09:30.000Z | 2022-03-20T02:24:36.000Z | neurora/corr_cal.py | neurora/neurora.io | eff6b715c89daae499aeb75450a26657d8cd3e4c | [
"MIT"
] | 3 | 2020-11-24T22:01:58.000Z | 2021-11-26T02:09:52.000Z | neurora/corr_cal.py | neurora/neurora.io | eff6b715c89daae499aeb75450a26657d8cd3e4c | [
"MIT"
] | 14 | 2019-09-11T08:50:57.000Z | 2022-01-04T09:19:47.000Z | # -*- coding: utf-8 -*-
' a module for calculating the similarity between two different modes\' data '
__author__ = 'Zitong Lu'
import numpy as np
from neurora.rdm_cal import bhvRDM, eegRDM, fmriRDM
from neurora.rdm_corr import rdm_correlation_spearman, rdm_correlation_pearson, rdm_correlation_kendall, \
rdm_similarity, rdm_distance
from neurora.stuff import show_progressbar
np.seterr(divide='ignore', invalid='ignore')
' a function for calculating the similarity between behavioral data and EEG-like data'
def bhvANDeeg_corr(bhv_data, eeg_data, sub_opt=1, chl_opt=0, time_opt=0, time_win=5, time_step=5, method="spearman",
rescale=False, permutation=False, iter=5000):
"""
Calculate the Similarities between behavioral data and EEG/MEG/fNIRS data
Parameters
----------
bhv_data : array
The behavioral data.
The shape of bhv_data must be [n_cons, n_subs, n_trials].
n_cons, n_subs & n_trials represent the number of conidtions, the number of subjects & the number of trials,
respectively.
eeg_data : array
The EEG/MEG/fNIRS data.
The shape of EEGdata must be [n_cons, n_subs, n_trials, n_chls, n_ts].
n_cons, n_subs, n_trials, n_chls & n_ts represent the number of conidtions, the number of subjects, the number
of trials, the number of channels & the number of time-points, respectively.
sub_opt : int 0 or 1. Default is 0.
Calculate the RDM & similarities for each subject or not.
If sub_opt=0, calculating based on all data.
If sub_opt=1, calculating based on each subject's data, respectively.
chl_opt : int 0 or 1. Default is 0.
Calculate the RDM & similarities for each channel or not.
If chl_opt=0, calculating based on all channels' data.
If chl_opt=1, calculating based on each channel's data respectively.
time_opt : int 0 or 1. Default is 0.
Calculate the RDM & similarities for each time-point or not
If time_opt=0, calculating based on whole time-points' data.
If time_opt=1, calculating based on each time-points respectively.
time_win : int. Default is 5.
Set a time-window for calculating the RDM & similarities for different time-points.
Only when time_opt=1, time_win works.
If time_win=5, that means each calculation process based on 5 time-points.
time_step : int. Default is 5.
The time step size for each time of calculating.
Only when time_opt=1, time_step works.
method : string 'spearman' or 'pearson' or 'kendall' or 'similarity' or 'distance'. Default is 'spearman'.
The method to calculate the similarities.
If method='spearman', calculate the Spearman Correlations. If method='pearson', calculate the Pearson
Correlations. If methd='kendall', calculate the Kendall tau Correlations. If method='similarity', calculate the
Cosine Similarities. If method='distance', calculate the Euclidean Distances.
rescale : bool True or False.
Rescale the values in RDM or not.
Here, the maximum-minimum method is used to rescale the values except for the
values on the diagonal.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
corrs : array
The similarities between behavioral data and EEG/MEG/fNIRS data.
If sub_opt=0 & chl_opt=0 & time_opt=0, return one corr result.
The shape of corrs is [2], a r-value and a p-value. If method='similarity' or method='distance', the
p-value is 0.
If sub_opt=0 & chl_opt=0 & time_opt=1, return int((n_ts-time_win)/time_step)+1 corrs result.
The shape of corrs is [int((n_ts-time_win)/time_step)+1, 2]. 2 represents a r-value and a p-value. If
method='similarity' or method='distance', the p-values are all 0.
If sub_opt=0 & chl_opt=1 & time_opt=0, return n_chls corrs result.
The shape of corrs is [n_chls, 2]. 2 represents a r-value and a p-value. If method='similarity' or
method='distance', the p-values are all 0.
If sub_opt=0 & chl_opt=1 & time_opt=1, return n_chls*(int((n_ts-time_win)/time_step)+1) corrs result.
The shape of corrs is [n_chls, int((n_ts-time_win)/time_step)+1, 2]. 2 represents a r-value and a p-value.
If method='similarity' or method='distance', the p-values are all 0.
If sub_opt=1 & chl_opt=0 & time_opt=0, return n_subs corr result.
The shape of corrs is [n_subs, 2], a r-value and a p-value. If method='similarity' or method='distance',
the p-values are all 0.
If sub_opt=1 & chl_opt=0 & time_opt=1, return n_subs*(int((n_ts-time_win)/time_step)+1) corrs result.
The shape of corrs is [n_subs, int((n_ts-time_win)/time_step)+1, 2]. 2 represents a r-value and a p-value.
If method='similarity' or method='distance', the p-values are all 0.
If sub_opt=1 & chl_opt=1 & time_opt=0, return n_subs*n_chls corrs result.
The shape of corrs is [n_subs, n_chls, 2]. 2 represents a r-value and a p-value. If method='similarity' or
method='distance', the p-values are all 0.
If sub_opt=1 & chl_opt=1 & time_opt=1, return n_subs*n_chls*(int((n_ts-time_win)/time_step)+1) corrs result.
The shape of corrs is [n_subs, n_chls, int((n_ts-time_win)/time_step)+1, 2]. 2 represents a r-value and a
p-value. If method='similarity' or method='distance', the p-values are all 0.
"""
if len(np.shape(bhv_data)) != 3 or len(np.shape(eeg_data)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(bhv_data)[1]
# get the number of channels
chls = np.shape(eeg_data)[3]
# get the number of time-points for calculating
ts = np.shape(eeg_data)[4]
ts = int((ts-time_win)/time_step)+1
if sub_opt == 1:
# calculate the bhv_rdm
bhv_rdms = bhvRDM(bhv_data, sub_opt=sub_opt)
if chl_opt == 0:
if time_opt == 0:
# sub_opt=1 & chl_opt=0 & time_opt=0
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt)
# initialize the corrs
corrs = np.zeros([subs, 2], dtype=np.float64)
# calculate the corrs
for i in range(subs):
if method == "spearman":
corrs[i] = rdm_correlation_spearman(bhv_rdms[i], eeg_rdms[i], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i] = rdm_correlation_pearson(bhv_rdms[i], eeg_rdms[i], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i] = rdm_correlation_kendall(bhv_rdms[i], eeg_rdms[i], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, 0] = rdm_similarity(bhv_rdms[i], eeg_rdms[i], rescale=rescale)
elif method == "distance":
corrs[i, 0] = rdm_distance(bhv_rdms[i], eeg_rdms[i], rescale=rescale)
print("\nComputing finished!")
return corrs
# sub_opt=1 & chl_opt=0 & time_opt=1
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt, time_win=time_win,
time_step=time_step)
# initialize the corrs
corrs = np.zeros([subs, ts, 2], dtype=np.float64)
total = subs * ts
# calculate the corrs
for i in range(subs):
for j in range(ts):
# show the progressbar
percent = (i * ts + j) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j] = rdm_correlation_spearman(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j] = rdm_correlation_pearson(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j] = rdm_correlation_kendall(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, 0] = rdm_similarity(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale)
elif method == "distance":
corrs[i, j, 0] = rdm_distance(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale)
print("\nComputing finished!")
return corrs
if time_opt == 1:
# sub_opt=1 & chl_opt=1 & time_opt=1
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt, time_win=time_win,
time_step=time_step)
# initialize the corrs
corrs = np.zeros([subs, chls, ts, 2], dtype=np.float64)
total = subs * chls * ts
# calculate the corrs
for i in range(subs):
for j in range(chls):
for k in range(ts):
# show the progressbar
percent = (i * chls * ts + j * ts + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j, k] = rdm_correlation_spearman(bhv_rdms[i], eeg_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j, k] = rdm_correlation_pearson(bhv_rdms[i], eeg_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j, k] = rdm_correlation_kendall(bhv_rdms[i], eeg_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, k, 0] = rdm_similarity(bhv_rdms[i], eeg_rdms[i, j, k], rescale=rescale)
elif method == "distance":
corrs[i, j, k, 0] = rdm_distance(bhv_rdms[i], eeg_rdms[i, j, k], rescale=rescale)
print("\nComputing finished!")
return corrs
# sub_opt=1 & chl_opt=1 & time_opt=0
print("\nComputing similarities")
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt)
corrs = np.zeros([subs, chls, 2], dtype=np.float64)
for i in range(subs):
for j in range(chls):
if method == "spearman":
corrs[i, j] = rdm_correlation_spearman(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j] = rdm_correlation_pearson(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j] = rdm_correlation_kendall(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, 0] = rdm_similarity(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale)
elif method == "distance":
corrs[i, j, 0] = rdm_distance(bhv_rdms[i], eeg_rdms[i, j], rescale=rescale)
print("\nComputing finished!")
return corrs
# if sub_opt=0
bhv_rdm = bhvRDM(bhv_data, sub_opt=sub_opt)
if chl_opt == 1:
if time_opt == 1:
# sub_opt = 0 & chl_opt = 1 & time_opt = 1
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt, time_win=time_win,
time_step=time_step)
# initialize the corrs
corrs = np.zeros([chls, ts, 2], dtype=np.float64)
total = chls * ts
# calculate the corrs
for i in range(chls):
for j in range(ts):
# show the progressbar
percent = (i * ts + j) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j] = rdm_correlation_spearman(bhv_rdm, eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j] = rdm_correlation_pearson(bhv_rdm, eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j] = rdm_correlation_kendall(bhv_rdm, eeg_rdms[i, j], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, 0] = rdm_similarity(bhv_rdm, eeg_rdms[i, j], rescale=rescale)
elif method == "distance":
corrs[i, j, 0] = rdm_distance(bhv_rdm, eeg_rdms[i, j], rescale=rescale)
return corrs
# sub_opt=0 & chl_opt=1 & time_opt=0
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt)
# initialize the corrs
corrs = np.zeros([chls, 2], dtype=np.float64)
# calculate the corrs
for i in range(chls):
if method == "spearman":
corrs[i] = rdm_correlation_spearman(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corrs[i] = rdm_correlation_pearson(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corrs[i] = rdm_correlation_kendall(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corrs[i, 0] = rdm_similarity(bhv_rdm, eeg_rdms[i], rescale=rescale)
elif method == "distance":
corrs[i, 0] = rdm_distance(bhv_rdm, eeg_rdms[i], rescale=rescale)
print("\nComputing finished!")
return corrs
# if chl_opt=0
if time_opt == 1:
# sub_opt=0 & chl_opt=0 & time_opt=1
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt, time_win=time_win,
time_step=time_step)
# initialize the corrs
corrs = np.zeros([ts, 2], dtype=np.float64)
# calculate the corrs
for i in range(ts):
if method == "spearman":
corrs[i] = rdm_correlation_spearman(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corrs[i] = rdm_correlation_pearson(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corrs[i] = rdm_correlation_kendall(bhv_rdm, eeg_rdms[i], rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corrs[i, 0] = rdm_similarity(bhv_rdm, eeg_rdms[i], rescale=rescale)
elif method == "distance":
corrs[i, 0] = rdm_distance(bhv_rdm, eeg_rdms[i], rescale=rescale)
print("\nComputing finished!")
return corrs
# sub_opt=0 & chl_opt=0 & time_opt=0
print("\nComputing similarities")
# calculate the eeg_rdms
eeg_rdm = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt, time_opt=time_opt)
# initialize the corrs
corr = np.zeros([2], dtype=np.float64)
# calculate the corrs
if method == "spearson":
corr = rdm_correlation_spearman(bhv_rdm, eeg_rdm, rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corr = rdm_correlation_pearson(bhv_rdm, eeg_rdm, rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corr = rdm_correlation_kendall(bhv_rdm, eeg_rdm, rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corr[0] = rdm_similarity(bhv_rdm, eeg_rdm, rescale=rescale)
elif method == "distance":
corr[0] = rdm_distance(bhv_rdm, eeg_rdm, rescale=rescale)
print("\nComputing finished!")
return corr
' a function for calculating the similarity between behavioral data and fMRI data (searchlight) '
def bhvANDfmri_corr(bhv_data, fmri_data, ksize=[3, 3, 3], strides=[1, 1, 1], sub_opt=1, method="spearman",
rescale=False, permutation=False, iter=5000):
"""
Calculate the Similarities between behavioral data and fMRI data for searchlight
Parameters
----------
bhv_data : array
The behavioral data.
The shape of bhv_data must be [n_cons, n_subs, n_trials].
n_cons, n_subs & n_trials represent the number of conidtions, the number of subjects & the number of trials,
respectively.
fmri_data : array
The fmri data.
The shape of fmri_data must be [n_cons, n_subs, nx, ny, nz]. n_cons, nx, ny, nz represent the number of
conditions, the number of subs & the size of fMRI-img, respectively.
ksize : array or list [kx, ky, kz]. Default is [3, 3, 3].
The size of the calculation unit for searchlight.
kx, ky, kz represent the number of voxels along the x, y, z axis.
kx, ky, kz should be odd.
strides : array or list [sx, sy, sz]. Default is [1, 1, 1].
The strides for calculating along the x, y, z axis.
sub_opt: int 0 or 1. Default is 1.
Return the subject-result or average-result.
If sub_opt=0, return the average result.
If sub_opt=1, return the results of each subject.
method : string 'spearman' or 'pearson' or 'kendall' or 'similarity' or 'distance'. Default is 'spearman'.
The method to calculate the similarities.
If method='spearman', calculate the Spearman Correlations. If method='pearson', calculate the Pearson
Correlations. If methd='kendall', calculate the Kendall tau Correlations. If method='similarity', calculate the
Cosine Similarities. If method='distance', calculate the Euclidean Distances.
rescale : bool True or False.
Rescale the values in RDM or not.
Here, the maximum-minimum method is used to rescale the values except for the
values on the diagonal.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
corrs : array
The similarities between behavioral data and fMRI data for searchlight.
If sub_result=0, the shape of corrs is [n_x, n_y, n_z, 2].
If sub_result=1, the shape of corrs is [n_subs, n_x, n_y, n_z, 2].
n_x, n_y, n_z represent the number of calculation units for searchlight along the x, y, z axis and 2 represents
a r-value and a p-value.
"""
if len(np.shape(bhv_data)) != 3 or len(np.shape(fmri_data)) != 5:
return "Invalid input!"
# calculate the bhv_rdm
bhv_rdm = bhvRDM(bhv_data, sub_opt=0)
# calculate the fmri_rdms for searchlight
fmri_rdms = fmriRDM(fmri_data, ksize=ksize, strides=strides, sub_opt=sub_opt)
# get the number of subjects
subs = np.shape(fmri_data)[1]
# get the size of the fMRI-img
nx = np.shape(fmri_data)[2]
ny = np.shape(fmri_data)[3]
nz = np.shape(fmri_data)[4]
# the size of the calculation units for searchlight
kx = ksize[0]
ky = ksize[1]
kz = ksize[2]
# strides for calculating along the x, y, z axis
sx = strides[0]
sy = strides[1]
sz = strides[2]
# calculate the number of the calculation units in the x, y, z directions
n_x = int((nx - kx) / sx) + 1
n_y = int((ny - ky) / sy) + 1
n_z = int((nz - kz) / sz) + 1
# sub_opt=0
if sub_opt == 0:
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([n_x, n_y, n_z, 2], np.nan)
total = n_x * n_y * n_z
# calculate the corrs
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# show the progressbar
percent = (i * n_y * n_z + j * n_z + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j, k] = rdm_correlation_spearman(bhv_rdm, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j, k] = rdm_correlation_pearson(bhv_rdm, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j, k] = rdm_correlation_kendall(bhv_rdm, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, k, 0] = rdm_similarity(bhv_rdm, fmri_rdms[i, j, k], rescale=rescale)
elif method == "distance":
corrs[i, j, k, 0] = rdm_distance(bhv_rdm, fmri_rdms[i, j, k], rescale=rescale)
print(corrs[i, j, k])
print("\nComputing finished!")
return corrs
# sub_opt=1
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([subs, n_x, n_y, n_z, 2], np.nan)
total = subs * n_x * n_y * n_z
# calculate the corrs
for sub in range(subs):
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# show the progressbar
percent = (sub * n_x * n_y * n_z + i * n_y * n_z + j * n_z + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[sub, i, j, k] = rdm_correlation_spearman(bhv_rdm, fmri_rdms[sub, i, j, k],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corrs[sub, i, j, k] = rdm_correlation_pearson(bhv_rdm, fmri_rdms[sub, i, j, k],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corrs[sub, i, j, k] = rdm_correlation_kendall(bhv_rdm, fmri_rdms[sub, i, j, k],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corrs[sub, i, j, k, 0] = rdm_similarity(bhv_rdm, fmri_rdms[sub, i, j, k], rescale=rescale)
elif method == "distance":
corrs[sub, i, j, k, 0] = rdm_distance(bhv_rdm, fmri_rdms[sub, i, j, k], rescale=rescale)
print(corrs[sub, i, j, k])
print("\nComputing finished!")
return corrs
' a function for calculating the similarity EEG-like data and fMRI data (searchlight) '
def eegANDfmri_corr(eeg_data, fmri_data, chl_opt=0, ksize=[3, 3, 3], strides=[1, 1, 1], sub_opt=1, method="spearman",
rescale=False, permutation=False, iter=1000):
"""
Calculate the Similarities between EEG/MEG/fNIRS data and fMRI data for searchligt
Parameters
----------
eeg_data : array
The EEG/MEG/fNIRS data.
The shape of EEGdata must be [n_cons, n_subs, n_trials, n_chls, n_ts].
n_cons, n_subs, n_trials, n_chls & n_ts represent the number of conidtions, the number of subjects, the number
of trials, the number of channels & the number of time-points, respectively.
fmri_data : array
The fmri data.
The shape of fmri_data must be [n_cons, n_subs, nx, ny, nz]. n_cons, nx, ny, nz represent the number of
conditions, the number of subs & the size of fMRI-img, respectively.
chl_opt : int 0 or 1. Default is 0.
Calculate the RDM & similarities for each channel or not.
If chl_opt=0, calculating based on all channels' data.
If chl_opt=1, calculating based on each channel's data respectively.
ksize : array or list [kx, ky, kz]. Default is [3, 3, 3].
The size of the calculation unit for searchlight.
kx, ky, kz represent the number of voxels along the x, y, z axis.
kx, ky, kz should be odd.
strides : array or list [sx, sy, sz]. Default is [1, 1, 1].
The strides for calculating along the x, y, z axis.
sub_opt: int 0 or 1. Default is 1.
Return the subject-result or average-result.
If sub_opt=0, return the average result.
If sub_opt=1, return the results of each subject.
method : string 'spearman' or 'pearson' or 'kendall' or 'similarity' or 'distance'. Default is 'spearman'.
The method to calculate the similarities.
If method='spearman', calculate the Spearman Correlations. If method='pearson', calculate the Pearson
Correlations. If methd='kendall', calculate the Kendall tau Correlations. If method='similarity', calculate the
Cosine Similarities. If method='distance', calculate the Euclidean Distances.
rescale : bool True or False.
Rescale the values in RDM or not.
Here, the maximum-minimum method is used to rescale the values except for the values on the diagonal.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 1000.
The times for iteration.
Returns
-------
corrs : array
The similarities between EEG/MEG/fNIRS data and fMRI data for searchlight.
If chl_opt=1 & sub_result=1, the shape of corrs is [n_subs, n_chls, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z
represent the number of subjects, the number of calculation units for searchlight along the x, y, z axis and 2
represents a r-value and a p-value.
If chl_opt=1 & sub_result=0, the shape of corrs is [n_chls, n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the
number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
If chl_opt=0 & sub_result=1, the shape of RDMs is [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent
the number of subjects, the number of calculation units for searchlight along the x, y, z axis and 2 represents
a r-value and a p-value.
If chl_opt=0 & sub_result=0, the shape of corrs is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of
calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
"""
if len(np.shape(eeg_data)) != 5 or len(np.shape(fmri_data)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(fmri_data)[1]
# get the size of the fMRI-img
nx = np.shape(fmri_data)[2]
ny = np.shape(fmri_data)[3]
nz = np.shape(fmri_data)[4]
# the size of the calculation units for searchlight
kx = ksize[0]
ky = ksize[1]
kz = ksize[2]
# strides for calculating along the x, y, z axis
sx = strides[0]
sy = strides[1]
sz = strides[0]
# calculate the number of the calculation units in the x, y, z directions
n_x = int((nx - kx) / sx) + 1
n_y = int((ny - ky) / sy) + 1
n_z = int((nz - kz) / sz) + 1
# get the number of channels
chls = np.shape(eeg_data)[3]
# calculate the fmri_rdms for searchlight
fmri_rdms = fmriRDM(fmri_data, sub_opt=sub_opt, ksize=ksize, strides=strides)
# calculate the eeg_rdms
eeg_rdms = eegRDM(eeg_data, sub_opt=sub_opt, chl_opt=chl_opt)
# chl_opt=1
if chl_opt == 1:
# sub_opt=1
if sub_opt == 1:
# chl_opt=1 & sub_result=1
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([subs, chls, n_x, n_y, n_z, 2], np.nan)
total = subs * n_x * n_y * n_z
# calculate the corrs
for sub in range(subs):
for j in range(n_x):
for k in range(n_y):
for l in range(n_z):
for i in range(chls):
# show the progressbar
percent = (sub * n_x * n_y * n_z + i * n_y * n_z + j * n_z + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[sub, i, j, k, l] = rdm_correlation_spearman(eeg_rdms[sub, i],
fmri_rdms[sub, j, k, l],
rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[sub, i, j, k, l] = rdm_correlation_pearson(eeg_rdms[sub, i],
fmri_rdms[sub, j, k, l],
rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[sub, i, j, k, l] = rdm_correlation_kendall(eeg_rdms[sub, i],
fmri_rdms[sub, j, k, l],
rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[sub, i, j, k, l, 0] = rdm_similarity(eeg_rdms[sub, i],
fmri_rdms[sub, j, k, l], rescale=rescale)
elif method == "distance":
corrs[sub, i, j, k, l, 0] = rdm_distance(eeg_rdms[sub, i], fmri_rdms[sub, i, j, k],
rescale=rescale)
print("\nComputing finished!")
return corrs
# sub_opt=0
# chl_opt=1 & sub_opt=0
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([chls, n_x, n_y, n_z, 2], np.nan)
total = n_x * n_y * n_z
# calculate the corrs
for j in range(n_x):
for k in range(n_y):
for l in range(n_z):
for i in range(chls):
# show the progressbar
percent = (i * n_y * n_z + j * n_z + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j, k, l] = rdm_correlation_spearman(eeg_rdms[i], fmri_rdms[j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corrs[i, j, k, l] = rdm_correlation_pearson(eeg_rdms[i], fmri_rdms[j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corrs[i, j, k, l] = rdm_correlation_kendall(eeg_rdms[i], fmri_rdms[j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corrs[i, j, k, l, 0] = rdm_similarity(eeg_rdms[i], fmri_rdms[j, k, l], rescale=rescale)
elif method == "distance":
corrs[i, j, k, l, 0] = rdm_distance(eeg_rdms[i], fmri_rdms[i, j, k], rescale=rescale)
print("\nComputing finished!")
return corrs
# chl_opt=0
# sub_opt=1
if sub_opt == 1:
# chl_opt=0 & sub_opt=1
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([subs, n_x, n_y, n_z, 2], np.nan)
total = subs * n_x * n_y * n_z
# calculate the corrs
for i in range(subs):
for j in range(n_x):
for k in range(n_y):
for l in range(n_z):
# show the progressbar
percent = (i * n_x * n_y * n_z + j * n_y * n_z + k * n_z + l) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j, k, l] = rdm_correlation_spearman(eeg_rdms[i], fmri_rdms[i, j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "pearson":
corrs[i, j, k, l] = rdm_correlation_pearson(eeg_rdms[i], fmri_rdms[i, j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "kendall":
corrs[i, j, k, l] = rdm_correlation_kendall(eeg_rdms[i], fmri_rdms[i, j, k, l],
rescale=rescale, permutation=permutation,
iter=iter)
elif method == "similarity":
corrs[i, j, k, l, 0] = rdm_similarity(eeg_rdms[i], fmri_rdms[i, j, k, l], rescale=rescale)
elif method == "distance":
corrs[i, j, k, l, 0] = rdm_distance(eeg_rdms[i], fmri_rdms[i, j, k, l], rescale=rescale)
print("\nComputing finished!")
return corrs
# sub_opt=0
# chl_opt=0 & sub_opt=0
print("\nComputing similarities")
# initialize the corrs
corrs = np.full([n_x, n_y, n_z, 2], np.nan)
total = n_x * n_y * n_z
# calculate the corrs
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# show the progressbar
percent = (i * n_y * n_z + j * n_z + k) / total * 100
show_progressbar("Calculating", percent)
if method == "spearman":
corrs[i, j, k] = rdm_correlation_spearman(eeg_rdms, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "pearson":
corrs[i, j, k] = rdm_correlation_pearson(eeg_rdms, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "kendall":
corrs[i, j, k] = rdm_correlation_kendall(eeg_rdms, fmri_rdms[i, j, k], rescale=rescale,
permutation=permutation, iter=iter)
elif method == "similarity":
corrs[i, j, k, 0] = rdm_similarity(eeg_rdms, fmri_rdms[i, j, k], rescale=rescale)
elif method == "distance":
corrs[i, j, k, 0] = rdm_distance(eeg_rdms, fmri_rdms[i, j, k], rescale=rescale)
print("\nComputing finished!")
return corrs | 46.407801 | 121 | 0.528514 | 4,965 | 39,261 | 4.026788 | 0.041088 | 0.009403 | 0.009603 | 0.075626 | 0.949582 | 0.938729 | 0.91557 | 0.902716 | 0.891462 | 0.865953 | 0 | 0.014104 | 0.378747 | 39,261 | 846 | 122 | 46.407801 | 0.805584 | 0.313492 | 0 | 0.735661 | 0 | 0.117207 | 0.064329 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007481 | false | 0 | 0.009975 | 0 | 0.05985 | 0.072319 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c7ba85c5bc69027abdf89241a8ee3cc4e8874003 | 11,349 | py | Python | src/asdp_solver.py | ORresearcher/A-New-Fast-and-Accurate-Heuristic-for-the-Automatic-Scene-Detection-Problem | a9aa202607c1a1d60f2cd9a551e786b527efdffd | [
"MIT"
] | 1 | 2021-08-01T16:52:05.000Z | 2021-08-01T16:52:05.000Z | src/asdp_solver.py | ORresearcher/A-New-Fast-and-Accurate-Heuristic-for-the-Automatic-Scene-Detection-Problem | a9aa202607c1a1d60f2cd9a551e786b527efdffd | [
"MIT"
] | null | null | null | src/asdp_solver.py | ORresearcher/A-New-Fast-and-Accurate-Heuristic-for-the-Automatic-Scene-Detection-Problem | a9aa202607c1a1d60f2cd9a551e786b527efdffd | [
"MIT"
] | null | null | null | from pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x07\x00\x42\x0d\x0d\x0a\x06\x29\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xbd\x0a\x00\x00\x00\x00\x00\x18\xff\xd8\xd6\xde\x31\x7d\x29\x1f\xac\x12\x5e\x86\x53\x49\xa6\xd8\x00\x00\x00\x00\x00\x00\x00\x00\xb9\x70\xba\xf4\xc1\x3e\xa2\xa2\x66\x6f\xba\xcd\x98\xa2\xd3\xbe\x0f\x4d\xbe\xc7\x12\xf2\xd5\x83\xdf\xce\x53\xea\x59\x96\xfd\xd1\x18\xf9\x61\xe0\x68\x73\xd4\x4a\x8f\xdc\xa7\xbc\x47\x34\xb8\x3c\x10\x4f\xbc\xca\x58\x8d\x6a\xb2\x82\x2c\xe2\xb1\x27\x52\x4d\x96\x2a\x0e\x24\x59\x83\x61\x72\x63\x92\x0b\x7e\xf6\xf2\x25\x48\xa6\x96\x5e\xea\x97\xbc\x5e\x41\x18\x29\x5d\x73\xb0\x94\xff\x37\x4b\xa2\x99\x4d\x02\xed\x2d\x5e\x1a\xc9\xb3\x71\xc5\xd0\x37\xda\x04\xec\x61\xbd\xb7\xf2\x7e\x14\xe9\x17\xed\x0a\x35\xa9\xcc\xcc\x81\xcd\x38\x96\x10\x15\x82\x7c\xa2\x58\x45\x8b\x04\x20\x0b\x53\xf0\x20\x5d\x2f\x2f\x53\x8c\xfc\xb5\x56\x6e\xf2\x06\x61\xaf\x02\x89\x91\xf6\x40\xe3\x93\x8f\xa2\x6c\x21\xb4\xd8\xc8\x6a\xfe\x6b\x46\x49\xad\xe6\xbc\x6e\x53\x0c\xe2\xbd\x42\x28\x50\xe5\xda\x7d\x6f\x5c\xb6\x62\x1f\xed\x2a\xc5\xdd\x1f\x9c\xcb\x47\xba\xda\x49\x5d\x8f\xd8\xdc\x69\xb8\x9a\x9e\x2d\xd5\x61\xcf\xbe\xcc\xe5\x1b\x97\x74\x10\x0e\x2d\xe5\x25\xa8\xc2\xb1\x8f\x2e\xf4\x17\xcf\x45\xac\x8e\x97\xd9\x0b\x42\xbb\x56\x75\x15\x22\x23\x89\xdc\x01\x86\x05\x07\x8a\xa6\x2d\x44\xba\x4e\x5f\xe2\xce\x63\x5d\x0c\xce\xc8\xea\xa6\x7f\x81\xb3\xe2\x9c\x16\x33\x19\x7c\x6d\xc3\xc7\xb9\x82\xc8\xb7\x05\x71\xe0\x71\xa9\xa9\xb4\xb4\x4b\x52\x13\x74\xe0\xf9\x02\x52\x55\x27\x49\xce\x91\x9c\xda\xbd\x35\xdc\x55\xfc\x59\x2e\xa0\x78\xc7\x61\x62\xff\x0f\x34\xed\x1d\xd6\x11\x6e\x85\xb0\x35\x8b\x86\x68\x6f\x97\xaa\x4f\xae\xd2\x17\xa6\xbc\x27\x6a\x79\xd3\xb6\x4a\xb4\x2a\x77\x56\xbe\xc3\xe5\xde\xfe\x0c\x8b\xef\x29\xbc\xe0\xc3\x3d\xb6\xcd\xb7\x0d\x1b\x2e\x9e\xd9\xc5\x16\x65\xb6\xc7\x2d\xbd\xa0\x72\x4d\xf8\x91\x6f\x33\x89\xd9\x02\xe2\xfa\xcd\x4b\x83\xa7\xa4\x2c\x72\x26\x85\xb9\xa4\x0a\x8f\x7b\x28\xfc\x24\x22\x0d\xb7\x38\x77\xdd\x44\x97\x17\x9a\xba\xf7\xb4\x13\x1c\x53\x70\x17\xbb\xe0\x25\x29\x20\x6e\xc2\x78\x94\x76\x70\xb7\xe9\xb0\xc4\x0b\xbd\x02\x12\x23\xaa\x88\x69\xad\x01\xb5\x26\x15\xb2\x05\xc9\x69\xf2\x1e\xd9\x2b\xef\xae\xf4\x0e\x74\xc0\xc3\xdc\x9f\xb9\xbb\x19\xc8\x88\x01\x03\xcc\x35\xe7\xe4\xe6\x6a\xfc\xde\xc7\xe9\xeb\xe5\x15\x36\x58\x94\xaa\x41\xf8\x2d\x1a\x29\xbe\xe4\x84\x3c\x46\x19\x7a\x75\x59\x66\x5d\x22\x24\x93\x3e\x6e\x17\x99\xf3\x4e\x46\x9b\x78\xac\x23\xc9\xf7\xa4\x3f\xef\xbe\xe4\x60\x62\xe8\x72\x9c\x35\x3b\x6d\xc6\x8d\x6b\x25\x29\x36\xbd\x90\x7e\xf6\x30\x12\x59\x2d\x00\xa2\x85\x53\x82\xe3\x59\xc1\x09\x53\x29\xdb\x10\xe7\x2d\x71\x45\xf9\xa7\xe1\x55\xc7\x61\xa5\xb2\xfd\xe9\xff\xe1\x59\xb6\xfa\x75\xc1\x1e\x6f\xc2\xd5\xbe\x52\xf0\x8b\x6e\x56\x80\x75\xb3\x28\x0f\x5e\xdd\xc0\x4c\xe2\x2b\x16\xb8\x79\x12\xb9\xfc\xa0\x6d\xc9\x89\xea\xec\xf1\x21\x38\xfe\x6f\x5d\x3d\x2a\x45\xd9\x8c\x51\xab\x35\xef\x79\x1d\x5e\xfc\x78\xaa\x13\x28\x37\xbe\x8b\xe1\x26\xb1\x1f\x59\x83\x16\x2d\x24\xa6\x85\xf9\xc3\xa4\x23\x12\x04\xf1\xf6\xea\xca\xc3\x20\xd2\xaf\x88\xb7\x92\xb4\xc1\xc5\x89\x18\x83\x68\xca\xec\xd2\x8c\xc4\x9a\x2e\x48\x97\xff\x07\x0f\x65\x2a\xfa\x3b\x2e\xc9\xae\x8e\xf6\xc4\x64\xca\x5a\xfe\x4f\x0d\x79\x5d\x58\xb8\x2d\x7e\x9f\xb2\x34\xaf\x23\x98\x13\x19\xd7\x73\x1a\x1c\xc6\xd2\x3b\xc1\x21\xb1\x63\xe9\x2a\xf6\x20\xbb\x56\x27\x21\xb2\x3f\xd2\x26\x54\xab\x52\xb3\x8b\x7e\x6d\x61\x85\xfa\x17\xa5\xef\x86\x85\x70\x1a\xe7\x74\x47\xf6\xb4\x09\x70\xad\x21\xb6\x10\x9a\x3c\x9d\x16\x3a\xfd\xf2\x2d\xaa\x7e\x4a\x00\x93\x49\x80\x79\x92\xd3\xbc\x23\x12\x35\x6c\x86\xed\xe5\x8c\x6b\x13\x3c\x37\x19\x26\xe0\x72\x2b\xb6\x90\x33\xfd\xab\x81\x2b\xf2\xf8\x79\x41\x60\x96\xde\xf6\x89\x37\xbb\x33\x01\x6c\x84\x13\x22\x6e\x1c\x6c\x86\x07\x1c\xd2\x3f\x25\xe9\x21\x62\x36\xd1\xff\xaa\xca\x70\x56\x80\xec\x6c\x66\xa3\x11\xfb\x5d\xf2\x35\xd5\x75\xcb\x08\xde\xb3\x61\x5e\xe1\xc3\x80\x60\xfe\xce\x80\x29\x31\x44\xe6\x22\x80\xcc\xce\xa0\xe1\x3a\xf5\xc4\x75\xac\x1c\xe4\x50\x41\x64\xf2\x1d\x59\xf2\xbe\xde\xbe\x0e\x8f\x96\x18\x9d\x05\xf9\xea\x33\xdf\xb8\x59\x38\x46\xe8\x12\x4b\xbc\xdb\xf2\x99\x28\x21\x1f\x8f\x0b\x95\x2d\xd1\xd5\xa3\x64\x30\x9a\xc1\x77\x26\xbd\xbd\xb7\xcb\x5c\xd4\x1d\xf5\x39\xe7\xc6\xed\x26\xb1\x3b\x1d\xc5\x89\xc1\xf7\x8b\x00\x84\x79\x0b\x56\x4b\x84\xe5\xd4\x13\xfd\xd0\x36\x11\xde\xe8\xdf\x1a\x17\x99\x8f\x27\xcb\x3d\x5d\xb9\xd0\x7b\xac\x80\x83\x32\x60\x75\xee\xe8\xe9\x20\x2a\x1f\xf6\x9a\x0e\xa9\x27\x0e\x98\x85\x3a\xc8\x23\x5c\x76\x21\x01\xff\xaf\xda\x2e\x0a\x19\xdd\x80\x2a\xbc\xb4\x9e\xbb\x1e\xcb\x2c\x6c\xd6\xa3\x30\xfa\xc6\x14\x48\xbe\x0a\x6c\x97\x3e\x94\x99\x54\x5d\x0c\x0e\x6f\x8a\x0a\x96\x04\x78\x85\xfc\x9c\x76\xde\xde\x5b\xed\x92\x03\xb9\x4a\x8f\x01\xce\x33\x70\x5e\x65\x93\x1e\x14\x4f\x1e\xe8\xaa\x50\xd0\x47\xbf\xf8\xc0\xf0\x4d\x7b\xaa\x31\x88\x6d\xc9\x82\x76\x19\x03\x49\xa1\x0c\x0b\x90\x21\x26\x76\xd3\x89\xa2\xd4\x6c\xb9\xcf\x8f\x2a\x37\xa1\xd8\x44\x3b\xfa\xbb\x92\xe3\x8d\x64\xf3\x1e\x0d\xec\x7c\x66\x54\x68\xa4\x1b\x4e\x44\x41\x68\x09\x08\xb0\xed\x42\x57\xa0\x83\x40\x64\x8c\xb0\x0b\xd2\x3e\xc2\x7a\x34\x15\x6b\x31\x4b\xa8\x3c\x81\xe1\x58\xb3\xb5\x93\xc0\xc3\x24\x4f\x76\x14\xd9\x0c\x95\x9b\x3d\xd1\xa4\x0a\x82\xd1\x58\xc2\x8c\xc4\x37\x14\xe8\x36\x8b\xa9\x72\x61\x33\x3d\x56\x2c\xaf\xc8\xa8\x95\x26\x15\x3e\xaf\xb1\xcc\x5d\x24\x05\x17\x05\x85\xb4\xde\x8d\xc4\x68\x38\x9b\xaa\x42\x5e\xef\x28\x0d\xac\xa1\x93\x6c\x5e\x62\x54\x27\x72\xd8\x3b\x57\x35\xfb\x64\x14\x81\xd4\xf1\x57\xad\xa7\x6d\xc7\x6d\x52\xe6\x8d\xfd\x53\xaf\x61\x54\x70\x58\xe0\x99\xa5\x5c\xc5\xd9\xcc\xad\x8a\xf6\xe4\x8c\xd7\x59\x6d\x28\x3c\x62\xe9\x22\xfa\x7e\x6a\x46\x77\xb9\x65\xf7\x55\xd9\x84\x42\x4c\x32\x5e\x30\x8f\x0d\x8d\xe4\xce\xbc\x50\xab\xb4\x33\x40\xb0\xd7\x1d\xa5\xb7\x9f\x13\x3d\x30\xbe\x84\x66\xb9\x88\x17\x1d\x11\x57\x1b\x76\xc1\xb8\xf3\xab\xd7\x9a\xfd\xb8\x87\xce\x7c\x00\x9f\x54\x9f\x95\x99\x1e\x77\x65\x3a\x0b\xfb\xcc\xb1\x7d\x07\x1e\x0e\xcb\xce\x48\x41\xe0\xd6\x06\x88\x5c\xd3\xf5\xbe\xb5\xf1\x29\x41\x85\x51\x00\xe9\x92\x1f\x97\xac\x71\x01\xb8\x64\xcf\xcc\x50\x20\xda\x84\x1b\xf4\x69\x84\x52\x54\xa1\x01\xe6\x72\x2a\xbd\x3e\x38\xcc\x87\xba\x51\x5f\xd8\x1e\x32\xbd\x3e\x3d\xa9\x04\x15\x9a\xd4\x33\x50\xef\xb0\x83\x6e\x3c\xdd\x06\xb1\x44\x32\x55\xc0\x2d\x82\xfb\xb2\xe1\x0d\x53\xa2\x20\xed\x91\xd0\xf4\xfe\x31\x24\xf3\x6a\xd2\x2f\x10\xf1\x36\xab\xd2\x77\xaf\x08\x18\x99\xeb\xb9\x01\x06\x6f\x3a\x94\xc6\xd5\xcf\x84\x93\x89\x19\x95\xc5\x28\x0f\x46\x09\x31\xbd\xfc\x88\xe2\x8b\xb7\xd1\xab\x20\xb4\xa3\xa0\xbd\xe2\xcf\x57\x2d\x04\x69\xe9\x9d\xf3\x99\xf2\x85\x04\x6a\xb1\x8d\xb6\x06\x90\x70\x60\xfd\x2f\x35\x84\x57\x44\x43\x29\x60\x74\x83\xd0\x76\xd0\x15\x71\xa0\x0e\x46\xca\x4e\x52\xa4\x1f\x33\x4c\x11\xbe\x54\xe6\xe4\x39\x2b\x43\xe1\x36\x06\xe1\xa6\xda\xfb\x36\xf3\x60\x3d\xc7\x00\xc5\x90\xcb\x69\x98\x90\xb9\x8e\xa3\xa1\xda\x99\x74\x07\x03\x3e\x7c\x89\x4d\x74\xb2\xe5\x4d\xfb\xaf\x45\x38\x80\xec\x40\x24\x00\x2a\xce\x37\xe5\x4a\x28\x35\x01\xda\x25\xe4\xe0\xfc\x8e\xca\x94\x90\xb5\x9c\xbd\x84\xb0\x52\x29\x9a\xb1\xa9\xc8\xd1\x7d\x96\x7b\x20\xcd\xe4\xcc\x4e\xbd\xbf\x46\x05\x8b\xc4\x74\x5a\x68\x17\xed\x6a\x87\xe7\xf8\xa9\x8b\x33\x94\x28\x33\x5d\x20\xdf\x2c\xb0\x08\x5b\x18\xb9\xf0\x39\xf9\xe3\xa1\x5b\xf0\xa0\xa7\xb1\x44\x9d\xb5\xaa\x57\x4b\xf3\x8d\x6b\xf4\xa8\xe6\xee\x68\xb8\x74\x4a\xb7\x1b\x51\x5f\x86\x37\x50\x16\x73\x1a\x2b\x09\xdc\x7d\xa1\xd2\xdf\x95\xe7\x91\xa9\x67\x56\x22\x79\x77\x84\x88\x15\xce\xf7\x44\x9b\x42\xb9\xa9\x00\x69\x32\x04\x2c\x16\x19\xdd\x58\x36\x55\x14\x37\xfa\xb4\xfa\xea\xa0\xbd\x43\xb0\x31\x8d\x22\x63\x88\x20\x16\x18\x20\x0e\x1c\xf7\xe2\xf0\x3a\x4f\x7e\xd8\xb1\x52\x43\xe3\x09\x50\xff\xdd\x07\xff\x84\x2f\x90\x68\x5b\x3b\x4a\x84\xdb\x05\xfd\x63\x3d\x51\xd8\x39\x77\xd8\xea\xf9\x4b\x4b\x5c\xb8\x81\x1f\x27\x1a\x45\x06\x34\xb4\x5c\x02\x7d\x8a\x57\x98\x2a\xf0\xec\xf1\x80\x6e\x3b\x60\x0c\xc0\xc3\x73\x96\x9b\xd2\x07\x1a\x4e\x4c\xcc\xc3\x45\xee\x20\x50\x7c\xee\x51\x8f\xd7\xbb\x2b\x6d\x02\x9d\x78\x89\x3b\xf1\xf0\x3e\x69\x06\x66\x6f\x82\xbc\x71\x42\xb4\x38\x47\x66\xe7\x87\x98\x85\x4a\x0f\x2d\x8e\x6f\xb5\x4f\xcf\x05\xaf\x90\x3e\x41\x1e\x46\xff\xd3\xa8\xae\xce\x43\x54\xda\xe7\x16\x47\x83\x61\x52\x34\xc9\x7c\x52\x0d\xd5\xc9\x9b\x88\xf2\xab\x77\x6a\xf1\xcd\xe9\xc1\x74\x34\xb4\x92\xfe\x10\xc6\x43\xf2\xcc\xb6\x02\xb2\x5c\x11\xf4\xff\xf4\x8e\x19\x97\xf2\x4c\x0a\x38\xe4\x8c\xa8\x08\xf5\x47\x9d\x50\x7a\x10\x66\xad\x9d\x71\xfe\x40\xa4\xe4\xc0\xdc\x71\xa2\x67\x30\xd4\x8f\xd7\x26\x80\x45\x45\x74\x12\x49\x2f\xf5\x4a\x16\xef\xbf\xed\xe9\xd7\xfe\x1b\x14\xeb\x30\x26\x80\x07\x19\xf7\x78\xe9\x15\x13\xd1\xe6\xe5\x07\xa7\x6b\xee\x83\x2f\xb5\x44\xb9\x4c\x4a\xdf\xf2\x43\x18\xcf\x1d\x35\x97\xb7\x47\xee\x7a\xeb\xe8\xe2\x92\x82\x46\x7d\x55\xbc\x10\xbd\x23\x15\xb2\x38\xe6\xd0\x43\x7a\x02\x41\xbf\x1a\x14\xab\x25\x38\x99\x6b\x5d\xef\x68\xa3\x1c\xea\x76\x87\x94\xd5\x36\xc4\xf3\x87\x4b\x7d\xd9\x5b\x14\xbd\x56\xc9\xea\x6f\xad\x27\x17\xc5\xcd\x8b\x80\x8d\xcc\x0b\xfc\x77\x85\x50\xf6\x9e\x47\x5f\x88\x27\xd2\x7f\xd9\x40\x4a\x01\xaf\x6b\x8d\xf5\xd5\x08\x0b\x14\x2c\xa5\xe2\x7f\x55\x84\xca\x72\xc8\x2a\x2b\x7c\xb9\x1b\x71\xd6\xb6\xd9\xce\x4c\x2d\x1a\xfe\x94\xc0\xa6\x0a\xe2\x2d\xfb\xa5\x86\x7c\x14\xea\xd7\x12\x6f\xa2\x9e\xa7\xff\x94\x02\xe5\xab\x03\x4d\x86\x35\x26\x6a\xdf\xeb\xb2\x0f\x56\xee\xd0\x8e\xb2\x9f\x56\x6f\x0c\x9f\x75\xbd\x8a\x74\xf8\xc5\x7f\x9c\xea\xa1\x8d\xe2\x2e\x28\x7a\xf2\xd8\x08\xf6\x2b\x1c\xc6\x49\xaa\x21\x61\xf5\xac\xd1\xa7\xc5\x88\x97\x58\x06\x9d\xa6\x9f\xde\x91\x5d\x97\x67\x68\x16\x53\x8e\x6b\xc5\xc6\x9e\xb8\x48\xa1\xc3\xb9\xaa\xf5\x2d\xa5\x96\x57\x6f\x16\x2d\xec\xb7\xe9\xc4\x00\x58\xed\xc8\xaf\xb8\xe1\x8e\xfb\x83\x84\xf5\xd1\x13\xfc\xc4\x48\xc7\xc0\x8d\x43\x45\xc3\x19\x43\x27\xed\xf9\x61\x33\x55\x61\xf2\xd7\xc7\x0b\xb9\x24\x69\x4c\xb3\x81\xff\xe0\x60\xe4\x49\xc9\x9f\x78\x95\x12\x34\xae\xb3\x20\x04\x44\x4b\x35\xfa\x84\xf9\x3f\xa6\x85\x58\xab\x44\x02\xf5\x87\x95\x72\x31\x44\xbc\x2d\xe9\xbd\x51\x22\x50\x61\xb2\x33\xf3\xb9\xd4\x72\x24\x2e\x49\xe6\x19\x51\x7d\x02\x7c\x0d\xaa\xc4\x3f\x15\xbe\x32\x06\xd5\x98\x28\x6b\xdc\xc6\xdc\xd8\x56\xdd\xfc\x23\x45\x55\x53\xc7\x4d\x4f\xbd\xe4\x30\xe8\x07\x6f\xe2\x0a\xcc\x30\x7d\xdd\xc7\x98\x87\x1b\xff\xad\x2f\x51\xc2\x12\xa9\xbb\x1a\x59\xb3\x43\xeb\x4d\xfc\x3c\xa4\x4e\xdd\xb2\x58\x5f\x7a\x2c\xcd\xad\xf0\x96\x1e\x11\x8a\x85\x46\x8d\x85\x51\x62\xc5\x15\xe0\x91\x62\x9c\x50\x10\xe9\xf7\x92\x2d\x21\x7e\xc7\x1d\xbe\x46\x61\xb9\x6d\x22\xe1\x49\x1c\xf9\x34\x15\xe7\xe1\x83\xb4\x85\xdc\x12\x74\xc3\x7d\x64\x15\x97\x61\x65\x67\x8d\x66\x16\x31\x25\x5d\x5d\x20\xa9\x94\xe6\x15\x9e\x85\x10\xef\x82\xf7\xd4\x50\xdd\x66\x1c\xfe\x48\xf5\x23\xff\x3d\xb7\x9e\x2c\x59\xc2\xf0\x8c\x40\xab\x5b\x16\xab\x35\x13\xab\x18\xa5\x38\x57\x9b\x0e\x1a\xe9\x63\x19\xc6\x04\xd2\x26\x90\xb9\x90\xef\xe9\x96\x44\x4e\xf4\x90\x1d\x38\x98\xb4\xea\xd2\xc0\xc5\x45\x0b\xe4\x7d\xb7\x0a\xaf\xb0\xbc\x1c\xaf\x4c\x35\xa7\xe5\x35\x35\xe2\xfc\xe0\xc4\xb0\xc9\x1b\x3c\x38\x54\x91\x3b\xec\x75\x86\x9c\x8b\xbe\xde\x25\x4d\x0e\xe5\x47\xb5\x13\x76\x50\xe5\xf6\x93\xbd\x68\x9f\x89\x85\xf7\x93\xfb\x0b\x8f\xb2\x05\xf0\x53\x13\x78\xff\xcb\xcc\x1b\xc9\xf9\xc3\x0c\x44\x84\x80\xa2\xe7\x32\xeb\x83\x2b\x32\x98\x1c\x90\xd6\xcc\x49\xac\x61\xe3\x65\xb0\x90\xb3\x68\x06\xab\xcf\x54\x50\x76\x2f\x99\xac\xf9\x82\x45\x55\xe2\x28\x29\x3f\x71\x53\xf2\x17\x90\xdb\x83\xca\x42\xad\xa5\xca\x48\x7f\xf4\xa6\x2d\xce\xf7\xa0\x7f\x4e\x46\xd9\xbd\x7a\xfe\x58\x38\x30\x4a\x1c\xb3\x5a\x9c\x31\xbe\x09\x3a\x3e\xbb\x83\x38\x8c\x20\xd9\xc2\x9a\xf2\xcc\x79\x7b\xf0\x98\xbf\xd9\xed\xf7\x3e\xd5\xc9\x7f\x2b\x5a\xe5\xd4\xe0\xf4\x5c\xd2\x22\x8d', 2) | 3,783 | 11,291 | 0.750639 | 2,825 | 11,349 | 3.010619 | 0.093805 | 0.013404 | 0.013757 | 0.011287 | 0.004586 | 0.002822 | 0.002822 | 0 | 0 | 0 | 0 | 0.3132 | 0.000705 | 11,349 | 3 | 11,291 | 3,783 | 0.436734 | 0 | 0 | 0 | 0 | 0.333333 | 0.991366 | 0.991366 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 11 |
c7bc571a29c45a40917dc7c904d5dbf68bfd7174 | 32 | py | Python | contrib/fypp-3.0-7895a7e-20200112/test/include/inimod2.py | dvuckovic/fckit | a83cfe6d4ed22c954548dd7c31e1fbad3cd2f908 | [
"Apache-2.0"
] | 125 | 2016-05-13T20:30:35.000Z | 2022-02-07T13:40:57.000Z | contrib/fypp-3.0-7895a7e-20200112/test/include/inimod2.py | dvuckovic/fckit | a83cfe6d4ed22c954548dd7c31e1fbad3cd2f908 | [
"Apache-2.0"
] | 20 | 2019-01-29T16:21:54.000Z | 2020-06-22T19:09:38.000Z | contrib/fypp-3.0-7895a7e-20200112/test/include/inimod2.py | dvuckovic/fckit | a83cfe6d4ed22c954548dd7c31e1fbad3cd2f908 | [
"Apache-2.0"
] | 19 | 2017-12-01T11:36:28.000Z | 2022-03-02T18:28:53.000Z | def get_version():
return 2
| 10.666667 | 18 | 0.65625 | 5 | 32 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 0.25 | 32 | 2 | 19 | 16 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
c7c128e73f5301a13f7e969862d25e35699a9a3a | 69,013 | py | Python | sdk/python/pulumi_aws/cognito/user_pool_client.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/cognito/user_pool_client.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cognito/user_pool_client.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['UserPoolClientArgs', 'UserPoolClient']
@pulumi.input_type
class UserPoolClientArgs:
def __init__(__self__, *,
user_pool_id: pulumi.Input[str],
access_token_validity: Optional[pulumi.Input[int]] = None,
allowed_oauth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_oauth_flows_user_pool_client: Optional[pulumi.Input[bool]] = None,
allowed_oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
analytics_configuration: Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']] = None,
callback_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_redirect_uri: Optional[pulumi.Input[str]] = None,
enable_token_revocation: Optional[pulumi.Input[bool]] = None,
explicit_auth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_secret: Optional[pulumi.Input[bool]] = None,
id_token_validity: Optional[pulumi.Input[int]] = None,
logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
prevent_user_existence_errors: Optional[pulumi.Input[str]] = None,
read_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
refresh_token_validity: Optional[pulumi.Input[int]] = None,
supported_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_validity_units: Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']] = None,
write_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a UserPoolClient resource.
:param pulumi.Input[str] user_pool_id: User pool the client belongs to.
:param pulumi.Input[int] access_token_validity: Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_flows: List of allowed OAuth flows (code, implicit, client_credentials).
:param pulumi.Input[bool] allowed_oauth_flows_user_pool_client: Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_scopes: List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
:param pulumi.Input['UserPoolClientAnalyticsConfigurationArgs'] analytics_configuration: Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] callback_urls: List of allowed callback URLs for the identity providers.
:param pulumi.Input[str] default_redirect_uri: Default redirect URI. Must be in the list of callback URLs.
:param pulumi.Input[bool] enable_token_revocation: Enables or disables token revocation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] explicit_auth_flows: List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
:param pulumi.Input[bool] generate_secret: Should an application secret be generated.
:param pulumi.Input[int] id_token_validity: Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] logout_urls: List of allowed logout URLs for the identity providers.
:param pulumi.Input[str] name: Name of the application client.
:param pulumi.Input[str] prevent_user_existence_errors: Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] read_attributes: List of user pool attributes the application client can read from.
:param pulumi.Input[int] refresh_token_validity: Time limit in days refresh tokens are valid for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_identity_providers: List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
:param pulumi.Input['UserPoolClientTokenValidityUnitsArgs'] token_validity_units: Configuration block for units in which the validity times are represented in. Detailed below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] write_attributes: List of user pool attributes the application client can write to.
"""
pulumi.set(__self__, "user_pool_id", user_pool_id)
if access_token_validity is not None:
pulumi.set(__self__, "access_token_validity", access_token_validity)
if allowed_oauth_flows is not None:
pulumi.set(__self__, "allowed_oauth_flows", allowed_oauth_flows)
if allowed_oauth_flows_user_pool_client is not None:
pulumi.set(__self__, "allowed_oauth_flows_user_pool_client", allowed_oauth_flows_user_pool_client)
if allowed_oauth_scopes is not None:
pulumi.set(__self__, "allowed_oauth_scopes", allowed_oauth_scopes)
if analytics_configuration is not None:
pulumi.set(__self__, "analytics_configuration", analytics_configuration)
if callback_urls is not None:
pulumi.set(__self__, "callback_urls", callback_urls)
if default_redirect_uri is not None:
pulumi.set(__self__, "default_redirect_uri", default_redirect_uri)
if enable_token_revocation is not None:
pulumi.set(__self__, "enable_token_revocation", enable_token_revocation)
if explicit_auth_flows is not None:
pulumi.set(__self__, "explicit_auth_flows", explicit_auth_flows)
if generate_secret is not None:
pulumi.set(__self__, "generate_secret", generate_secret)
if id_token_validity is not None:
pulumi.set(__self__, "id_token_validity", id_token_validity)
if logout_urls is not None:
pulumi.set(__self__, "logout_urls", logout_urls)
if name is not None:
pulumi.set(__self__, "name", name)
if prevent_user_existence_errors is not None:
pulumi.set(__self__, "prevent_user_existence_errors", prevent_user_existence_errors)
if read_attributes is not None:
pulumi.set(__self__, "read_attributes", read_attributes)
if refresh_token_validity is not None:
pulumi.set(__self__, "refresh_token_validity", refresh_token_validity)
if supported_identity_providers is not None:
pulumi.set(__self__, "supported_identity_providers", supported_identity_providers)
if token_validity_units is not None:
pulumi.set(__self__, "token_validity_units", token_validity_units)
if write_attributes is not None:
pulumi.set(__self__, "write_attributes", write_attributes)
@property
@pulumi.getter(name="userPoolId")
def user_pool_id(self) -> pulumi.Input[str]:
"""
User pool the client belongs to.
"""
return pulumi.get(self, "user_pool_id")
@user_pool_id.setter
def user_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_id", value)
@property
@pulumi.getter(name="accessTokenValidity")
def access_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "access_token_validity")
@access_token_validity.setter
def access_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "access_token_validity", value)
@property
@pulumi.getter(name="allowedOauthFlows")
def allowed_oauth_flows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed OAuth flows (code, implicit, client_credentials).
"""
return pulumi.get(self, "allowed_oauth_flows")
@allowed_oauth_flows.setter
def allowed_oauth_flows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_oauth_flows", value)
@property
@pulumi.getter(name="allowedOauthFlowsUserPoolClient")
def allowed_oauth_flows_user_pool_client(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
"""
return pulumi.get(self, "allowed_oauth_flows_user_pool_client")
@allowed_oauth_flows_user_pool_client.setter
def allowed_oauth_flows_user_pool_client(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allowed_oauth_flows_user_pool_client", value)
@property
@pulumi.getter(name="allowedOauthScopes")
def allowed_oauth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
"""
return pulumi.get(self, "allowed_oauth_scopes")
@allowed_oauth_scopes.setter
def allowed_oauth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_oauth_scopes", value)
@property
@pulumi.getter(name="analyticsConfiguration")
def analytics_configuration(self) -> Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']]:
"""
Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
"""
return pulumi.get(self, "analytics_configuration")
@analytics_configuration.setter
def analytics_configuration(self, value: Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']]):
pulumi.set(self, "analytics_configuration", value)
@property
@pulumi.getter(name="callbackUrls")
def callback_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed callback URLs for the identity providers.
"""
return pulumi.get(self, "callback_urls")
@callback_urls.setter
def callback_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callback_urls", value)
@property
@pulumi.getter(name="defaultRedirectUri")
def default_redirect_uri(self) -> Optional[pulumi.Input[str]]:
"""
Default redirect URI. Must be in the list of callback URLs.
"""
return pulumi.get(self, "default_redirect_uri")
@default_redirect_uri.setter
def default_redirect_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_redirect_uri", value)
@property
@pulumi.getter(name="enableTokenRevocation")
def enable_token_revocation(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables token revocation.
"""
return pulumi.get(self, "enable_token_revocation")
@enable_token_revocation.setter
def enable_token_revocation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_token_revocation", value)
@property
@pulumi.getter(name="explicitAuthFlows")
def explicit_auth_flows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
"""
return pulumi.get(self, "explicit_auth_flows")
@explicit_auth_flows.setter
def explicit_auth_flows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "explicit_auth_flows", value)
@property
@pulumi.getter(name="generateSecret")
def generate_secret(self) -> Optional[pulumi.Input[bool]]:
"""
Should an application secret be generated.
"""
return pulumi.get(self, "generate_secret")
@generate_secret.setter
def generate_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "generate_secret", value)
@property
@pulumi.getter(name="idTokenValidity")
def id_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "id_token_validity")
@id_token_validity.setter
def id_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id_token_validity", value)
@property
@pulumi.getter(name="logoutUrls")
def logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed logout URLs for the identity providers.
"""
return pulumi.get(self, "logout_urls")
@logout_urls.setter
def logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "logout_urls", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the application client.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="preventUserExistenceErrors")
def prevent_user_existence_errors(self) -> Optional[pulumi.Input[str]]:
"""
Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
"""
return pulumi.get(self, "prevent_user_existence_errors")
@prevent_user_existence_errors.setter
def prevent_user_existence_errors(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prevent_user_existence_errors", value)
@property
@pulumi.getter(name="readAttributes")
def read_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user pool attributes the application client can read from.
"""
return pulumi.get(self, "read_attributes")
@read_attributes.setter
def read_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "read_attributes", value)
@property
@pulumi.getter(name="refreshTokenValidity")
def refresh_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit in days refresh tokens are valid for.
"""
return pulumi.get(self, "refresh_token_validity")
@refresh_token_validity.setter
def refresh_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_token_validity", value)
@property
@pulumi.getter(name="supportedIdentityProviders")
def supported_identity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
"""
return pulumi.get(self, "supported_identity_providers")
@supported_identity_providers.setter
def supported_identity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "supported_identity_providers", value)
@property
@pulumi.getter(name="tokenValidityUnits")
def token_validity_units(self) -> Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']]:
"""
Configuration block for units in which the validity times are represented in. Detailed below.
"""
return pulumi.get(self, "token_validity_units")
@token_validity_units.setter
def token_validity_units(self, value: Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']]):
pulumi.set(self, "token_validity_units", value)
@property
@pulumi.getter(name="writeAttributes")
def write_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user pool attributes the application client can write to.
"""
return pulumi.get(self, "write_attributes")
@write_attributes.setter
def write_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "write_attributes", value)
@pulumi.input_type
class _UserPoolClientState:
def __init__(__self__, *,
access_token_validity: Optional[pulumi.Input[int]] = None,
allowed_oauth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_oauth_flows_user_pool_client: Optional[pulumi.Input[bool]] = None,
allowed_oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
analytics_configuration: Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']] = None,
callback_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
default_redirect_uri: Optional[pulumi.Input[str]] = None,
enable_token_revocation: Optional[pulumi.Input[bool]] = None,
explicit_auth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_secret: Optional[pulumi.Input[bool]] = None,
id_token_validity: Optional[pulumi.Input[int]] = None,
logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
prevent_user_existence_errors: Optional[pulumi.Input[str]] = None,
read_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
refresh_token_validity: Optional[pulumi.Input[int]] = None,
supported_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_validity_units: Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']] = None,
user_pool_id: Optional[pulumi.Input[str]] = None,
write_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering UserPoolClient resources.
:param pulumi.Input[int] access_token_validity: Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_flows: List of allowed OAuth flows (code, implicit, client_credentials).
:param pulumi.Input[bool] allowed_oauth_flows_user_pool_client: Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_scopes: List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
:param pulumi.Input['UserPoolClientAnalyticsConfigurationArgs'] analytics_configuration: Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] callback_urls: List of allowed callback URLs for the identity providers.
:param pulumi.Input[str] client_secret: Client secret of the user pool client.
:param pulumi.Input[str] default_redirect_uri: Default redirect URI. Must be in the list of callback URLs.
:param pulumi.Input[bool] enable_token_revocation: Enables or disables token revocation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] explicit_auth_flows: List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
:param pulumi.Input[bool] generate_secret: Should an application secret be generated.
:param pulumi.Input[int] id_token_validity: Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] logout_urls: List of allowed logout URLs for the identity providers.
:param pulumi.Input[str] name: Name of the application client.
:param pulumi.Input[str] prevent_user_existence_errors: Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] read_attributes: List of user pool attributes the application client can read from.
:param pulumi.Input[int] refresh_token_validity: Time limit in days refresh tokens are valid for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_identity_providers: List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
:param pulumi.Input['UserPoolClientTokenValidityUnitsArgs'] token_validity_units: Configuration block for units in which the validity times are represented in. Detailed below.
:param pulumi.Input[str] user_pool_id: User pool the client belongs to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] write_attributes: List of user pool attributes the application client can write to.
"""
if access_token_validity is not None:
pulumi.set(__self__, "access_token_validity", access_token_validity)
if allowed_oauth_flows is not None:
pulumi.set(__self__, "allowed_oauth_flows", allowed_oauth_flows)
if allowed_oauth_flows_user_pool_client is not None:
pulumi.set(__self__, "allowed_oauth_flows_user_pool_client", allowed_oauth_flows_user_pool_client)
if allowed_oauth_scopes is not None:
pulumi.set(__self__, "allowed_oauth_scopes", allowed_oauth_scopes)
if analytics_configuration is not None:
pulumi.set(__self__, "analytics_configuration", analytics_configuration)
if callback_urls is not None:
pulumi.set(__self__, "callback_urls", callback_urls)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if default_redirect_uri is not None:
pulumi.set(__self__, "default_redirect_uri", default_redirect_uri)
if enable_token_revocation is not None:
pulumi.set(__self__, "enable_token_revocation", enable_token_revocation)
if explicit_auth_flows is not None:
pulumi.set(__self__, "explicit_auth_flows", explicit_auth_flows)
if generate_secret is not None:
pulumi.set(__self__, "generate_secret", generate_secret)
if id_token_validity is not None:
pulumi.set(__self__, "id_token_validity", id_token_validity)
if logout_urls is not None:
pulumi.set(__self__, "logout_urls", logout_urls)
if name is not None:
pulumi.set(__self__, "name", name)
if prevent_user_existence_errors is not None:
pulumi.set(__self__, "prevent_user_existence_errors", prevent_user_existence_errors)
if read_attributes is not None:
pulumi.set(__self__, "read_attributes", read_attributes)
if refresh_token_validity is not None:
pulumi.set(__self__, "refresh_token_validity", refresh_token_validity)
if supported_identity_providers is not None:
pulumi.set(__self__, "supported_identity_providers", supported_identity_providers)
if token_validity_units is not None:
pulumi.set(__self__, "token_validity_units", token_validity_units)
if user_pool_id is not None:
pulumi.set(__self__, "user_pool_id", user_pool_id)
if write_attributes is not None:
pulumi.set(__self__, "write_attributes", write_attributes)
@property
@pulumi.getter(name="accessTokenValidity")
def access_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "access_token_validity")
@access_token_validity.setter
def access_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "access_token_validity", value)
@property
@pulumi.getter(name="allowedOauthFlows")
def allowed_oauth_flows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed OAuth flows (code, implicit, client_credentials).
"""
return pulumi.get(self, "allowed_oauth_flows")
@allowed_oauth_flows.setter
def allowed_oauth_flows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_oauth_flows", value)
@property
@pulumi.getter(name="allowedOauthFlowsUserPoolClient")
def allowed_oauth_flows_user_pool_client(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
"""
return pulumi.get(self, "allowed_oauth_flows_user_pool_client")
@allowed_oauth_flows_user_pool_client.setter
def allowed_oauth_flows_user_pool_client(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allowed_oauth_flows_user_pool_client", value)
@property
@pulumi.getter(name="allowedOauthScopes")
def allowed_oauth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
"""
return pulumi.get(self, "allowed_oauth_scopes")
@allowed_oauth_scopes.setter
def allowed_oauth_scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_oauth_scopes", value)
@property
@pulumi.getter(name="analyticsConfiguration")
def analytics_configuration(self) -> Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']]:
"""
Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
"""
return pulumi.get(self, "analytics_configuration")
@analytics_configuration.setter
def analytics_configuration(self, value: Optional[pulumi.Input['UserPoolClientAnalyticsConfigurationArgs']]):
pulumi.set(self, "analytics_configuration", value)
@property
@pulumi.getter(name="callbackUrls")
def callback_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed callback URLs for the identity providers.
"""
return pulumi.get(self, "callback_urls")
@callback_urls.setter
def callback_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "callback_urls", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
Client secret of the user pool client.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="defaultRedirectUri")
def default_redirect_uri(self) -> Optional[pulumi.Input[str]]:
"""
Default redirect URI. Must be in the list of callback URLs.
"""
return pulumi.get(self, "default_redirect_uri")
@default_redirect_uri.setter
def default_redirect_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_redirect_uri", value)
@property
@pulumi.getter(name="enableTokenRevocation")
def enable_token_revocation(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables token revocation.
"""
return pulumi.get(self, "enable_token_revocation")
@enable_token_revocation.setter
def enable_token_revocation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_token_revocation", value)
@property
@pulumi.getter(name="explicitAuthFlows")
def explicit_auth_flows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
"""
return pulumi.get(self, "explicit_auth_flows")
@explicit_auth_flows.setter
def explicit_auth_flows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "explicit_auth_flows", value)
@property
@pulumi.getter(name="generateSecret")
def generate_secret(self) -> Optional[pulumi.Input[bool]]:
"""
Should an application secret be generated.
"""
return pulumi.get(self, "generate_secret")
@generate_secret.setter
def generate_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "generate_secret", value)
@property
@pulumi.getter(name="idTokenValidity")
def id_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "id_token_validity")
@id_token_validity.setter
def id_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "id_token_validity", value)
@property
@pulumi.getter(name="logoutUrls")
def logout_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of allowed logout URLs for the identity providers.
"""
return pulumi.get(self, "logout_urls")
@logout_urls.setter
def logout_urls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "logout_urls", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the application client.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="preventUserExistenceErrors")
def prevent_user_existence_errors(self) -> Optional[pulumi.Input[str]]:
"""
Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
"""
return pulumi.get(self, "prevent_user_existence_errors")
@prevent_user_existence_errors.setter
def prevent_user_existence_errors(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prevent_user_existence_errors", value)
@property
@pulumi.getter(name="readAttributes")
def read_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user pool attributes the application client can read from.
"""
return pulumi.get(self, "read_attributes")
@read_attributes.setter
def read_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "read_attributes", value)
@property
@pulumi.getter(name="refreshTokenValidity")
def refresh_token_validity(self) -> Optional[pulumi.Input[int]]:
"""
Time limit in days refresh tokens are valid for.
"""
return pulumi.get(self, "refresh_token_validity")
@refresh_token_validity.setter
def refresh_token_validity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_token_validity", value)
@property
@pulumi.getter(name="supportedIdentityProviders")
def supported_identity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
"""
return pulumi.get(self, "supported_identity_providers")
@supported_identity_providers.setter
def supported_identity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "supported_identity_providers", value)
@property
@pulumi.getter(name="tokenValidityUnits")
def token_validity_units(self) -> Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']]:
"""
Configuration block for units in which the validity times are represented in. Detailed below.
"""
return pulumi.get(self, "token_validity_units")
@token_validity_units.setter
def token_validity_units(self, value: Optional[pulumi.Input['UserPoolClientTokenValidityUnitsArgs']]):
pulumi.set(self, "token_validity_units", value)
@property
@pulumi.getter(name="userPoolId")
def user_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
User pool the client belongs to.
"""
return pulumi.get(self, "user_pool_id")
@user_pool_id.setter
def user_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_pool_id", value)
@property
@pulumi.getter(name="writeAttributes")
def write_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user pool attributes the application client can write to.
"""
return pulumi.get(self, "write_attributes")
@write_attributes.setter
def write_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "write_attributes", value)
class UserPoolClient(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_token_validity: Optional[pulumi.Input[int]] = None,
allowed_oauth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_oauth_flows_user_pool_client: Optional[pulumi.Input[bool]] = None,
allowed_oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
analytics_configuration: Optional[pulumi.Input[pulumi.InputType['UserPoolClientAnalyticsConfigurationArgs']]] = None,
callback_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_redirect_uri: Optional[pulumi.Input[str]] = None,
enable_token_revocation: Optional[pulumi.Input[bool]] = None,
explicit_auth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_secret: Optional[pulumi.Input[bool]] = None,
id_token_validity: Optional[pulumi.Input[int]] = None,
logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
prevent_user_existence_errors: Optional[pulumi.Input[str]] = None,
read_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
refresh_token_validity: Optional[pulumi.Input[int]] = None,
supported_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_validity_units: Optional[pulumi.Input[pulumi.InputType['UserPoolClientTokenValidityUnitsArgs']]] = None,
user_pool_id: Optional[pulumi.Input[str]] = None,
write_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Cognito User Pool Client resource.
## Example Usage
### Create a basic user pool client
```python
import pulumi
import pulumi_aws as aws
pool = aws.cognito.UserPool("pool")
client = aws.cognito.UserPoolClient("client", user_pool_id=pool.id)
```
### Create a user pool client with no SRP authentication
```python
import pulumi
import pulumi_aws as aws
pool = aws.cognito.UserPool("pool")
client = aws.cognito.UserPoolClient("client",
user_pool_id=pool.id,
generate_secret=True,
explicit_auth_flows=["ADMIN_NO_SRP_AUTH"])
```
### Create a user pool client with pinpoint analytics
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
test_user_pool = aws.cognito.UserPool("testUserPool")
test_app = aws.pinpoint.App("testApp")
test_role = aws.iam.Role("testRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "cognito-idp.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
test_role_policy = aws.iam.RolePolicy("testRolePolicy",
role=test_role.id,
policy=test_app.application_id.apply(lambda application_id: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": [
"mobiletargeting:UpdateEndpoint",
"mobiletargeting:PutItems"
],
"Effect": "Allow",
"Resource": "arn:aws:mobiletargeting:*:{current.account_id}:apps/{application_id}*"
}}
]
}}
\"\"\"))
test_user_pool_client = aws.cognito.UserPoolClient("testUserPoolClient",
user_pool_id=test_user_pool.id,
analytics_configuration=aws.cognito.UserPoolClientAnalyticsConfigurationArgs(
application_id=test_app.application_id,
external_id="some_id",
role_arn=test_role.arn,
user_data_shared=True,
))
```
## Import
Cognito User Pool Clients can be imported using the `id` of the Cognito User Pool, and the `id` of the Cognito User Pool Client, e.g.,
```sh
$ pulumi import aws:cognito/userPoolClient:UserPoolClient client <user_pool_id>/<user_pool_client_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] access_token_validity: Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_flows: List of allowed OAuth flows (code, implicit, client_credentials).
:param pulumi.Input[bool] allowed_oauth_flows_user_pool_client: Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_scopes: List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
:param pulumi.Input[pulumi.InputType['UserPoolClientAnalyticsConfigurationArgs']] analytics_configuration: Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] callback_urls: List of allowed callback URLs for the identity providers.
:param pulumi.Input[str] default_redirect_uri: Default redirect URI. Must be in the list of callback URLs.
:param pulumi.Input[bool] enable_token_revocation: Enables or disables token revocation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] explicit_auth_flows: List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
:param pulumi.Input[bool] generate_secret: Should an application secret be generated.
:param pulumi.Input[int] id_token_validity: Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] logout_urls: List of allowed logout URLs for the identity providers.
:param pulumi.Input[str] name: Name of the application client.
:param pulumi.Input[str] prevent_user_existence_errors: Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] read_attributes: List of user pool attributes the application client can read from.
:param pulumi.Input[int] refresh_token_validity: Time limit in days refresh tokens are valid for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_identity_providers: List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
:param pulumi.Input[pulumi.InputType['UserPoolClientTokenValidityUnitsArgs']] token_validity_units: Configuration block for units in which the validity times are represented in. Detailed below.
:param pulumi.Input[str] user_pool_id: User pool the client belongs to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] write_attributes: List of user pool attributes the application client can write to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserPoolClientArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Cognito User Pool Client resource.
## Example Usage
### Create a basic user pool client
```python
import pulumi
import pulumi_aws as aws
pool = aws.cognito.UserPool("pool")
client = aws.cognito.UserPoolClient("client", user_pool_id=pool.id)
```
### Create a user pool client with no SRP authentication
```python
import pulumi
import pulumi_aws as aws
pool = aws.cognito.UserPool("pool")
client = aws.cognito.UserPoolClient("client",
user_pool_id=pool.id,
generate_secret=True,
explicit_auth_flows=["ADMIN_NO_SRP_AUTH"])
```
### Create a user pool client with pinpoint analytics
```python
import pulumi
import pulumi_aws as aws
current = aws.get_caller_identity()
test_user_pool = aws.cognito.UserPool("testUserPool")
test_app = aws.pinpoint.App("testApp")
test_role = aws.iam.Role("testRole", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "cognito-idp.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
\"\"\")
test_role_policy = aws.iam.RolePolicy("testRolePolicy",
role=test_role.id,
policy=test_app.application_id.apply(lambda application_id: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Action": [
"mobiletargeting:UpdateEndpoint",
"mobiletargeting:PutItems"
],
"Effect": "Allow",
"Resource": "arn:aws:mobiletargeting:*:{current.account_id}:apps/{application_id}*"
}}
]
}}
\"\"\"))
test_user_pool_client = aws.cognito.UserPoolClient("testUserPoolClient",
user_pool_id=test_user_pool.id,
analytics_configuration=aws.cognito.UserPoolClientAnalyticsConfigurationArgs(
application_id=test_app.application_id,
external_id="some_id",
role_arn=test_role.arn,
user_data_shared=True,
))
```
## Import
Cognito User Pool Clients can be imported using the `id` of the Cognito User Pool, and the `id` of the Cognito User Pool Client, e.g.,
```sh
$ pulumi import aws:cognito/userPoolClient:UserPoolClient client <user_pool_id>/<user_pool_client_id>
```
:param str resource_name: The name of the resource.
:param UserPoolClientArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserPoolClientArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_token_validity: Optional[pulumi.Input[int]] = None,
allowed_oauth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_oauth_flows_user_pool_client: Optional[pulumi.Input[bool]] = None,
allowed_oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
analytics_configuration: Optional[pulumi.Input[pulumi.InputType['UserPoolClientAnalyticsConfigurationArgs']]] = None,
callback_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_redirect_uri: Optional[pulumi.Input[str]] = None,
enable_token_revocation: Optional[pulumi.Input[bool]] = None,
explicit_auth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_secret: Optional[pulumi.Input[bool]] = None,
id_token_validity: Optional[pulumi.Input[int]] = None,
logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
prevent_user_existence_errors: Optional[pulumi.Input[str]] = None,
read_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
refresh_token_validity: Optional[pulumi.Input[int]] = None,
supported_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_validity_units: Optional[pulumi.Input[pulumi.InputType['UserPoolClientTokenValidityUnitsArgs']]] = None,
user_pool_id: Optional[pulumi.Input[str]] = None,
write_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserPoolClientArgs.__new__(UserPoolClientArgs)
__props__.__dict__["access_token_validity"] = access_token_validity
__props__.__dict__["allowed_oauth_flows"] = allowed_oauth_flows
__props__.__dict__["allowed_oauth_flows_user_pool_client"] = allowed_oauth_flows_user_pool_client
__props__.__dict__["allowed_oauth_scopes"] = allowed_oauth_scopes
__props__.__dict__["analytics_configuration"] = analytics_configuration
__props__.__dict__["callback_urls"] = callback_urls
__props__.__dict__["default_redirect_uri"] = default_redirect_uri
__props__.__dict__["enable_token_revocation"] = enable_token_revocation
__props__.__dict__["explicit_auth_flows"] = explicit_auth_flows
__props__.__dict__["generate_secret"] = generate_secret
__props__.__dict__["id_token_validity"] = id_token_validity
__props__.__dict__["logout_urls"] = logout_urls
__props__.__dict__["name"] = name
__props__.__dict__["prevent_user_existence_errors"] = prevent_user_existence_errors
__props__.__dict__["read_attributes"] = read_attributes
__props__.__dict__["refresh_token_validity"] = refresh_token_validity
__props__.__dict__["supported_identity_providers"] = supported_identity_providers
__props__.__dict__["token_validity_units"] = token_validity_units
if user_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'user_pool_id'")
__props__.__dict__["user_pool_id"] = user_pool_id
__props__.__dict__["write_attributes"] = write_attributes
__props__.__dict__["client_secret"] = None
super(UserPoolClient, __self__).__init__(
'aws:cognito/userPoolClient:UserPoolClient',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_token_validity: Optional[pulumi.Input[int]] = None,
allowed_oauth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
allowed_oauth_flows_user_pool_client: Optional[pulumi.Input[bool]] = None,
allowed_oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
analytics_configuration: Optional[pulumi.Input[pulumi.InputType['UserPoolClientAnalyticsConfigurationArgs']]] = None,
callback_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
default_redirect_uri: Optional[pulumi.Input[str]] = None,
enable_token_revocation: Optional[pulumi.Input[bool]] = None,
explicit_auth_flows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
generate_secret: Optional[pulumi.Input[bool]] = None,
id_token_validity: Optional[pulumi.Input[int]] = None,
logout_urls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
prevent_user_existence_errors: Optional[pulumi.Input[str]] = None,
read_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
refresh_token_validity: Optional[pulumi.Input[int]] = None,
supported_identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_validity_units: Optional[pulumi.Input[pulumi.InputType['UserPoolClientTokenValidityUnitsArgs']]] = None,
user_pool_id: Optional[pulumi.Input[str]] = None,
write_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'UserPoolClient':
"""
Get an existing UserPoolClient resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] access_token_validity: Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_flows: List of allowed OAuth flows (code, implicit, client_credentials).
:param pulumi.Input[bool] allowed_oauth_flows_user_pool_client: Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_oauth_scopes: List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
:param pulumi.Input[pulumi.InputType['UserPoolClientAnalyticsConfigurationArgs']] analytics_configuration: Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] callback_urls: List of allowed callback URLs for the identity providers.
:param pulumi.Input[str] client_secret: Client secret of the user pool client.
:param pulumi.Input[str] default_redirect_uri: Default redirect URI. Must be in the list of callback URLs.
:param pulumi.Input[bool] enable_token_revocation: Enables or disables token revocation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] explicit_auth_flows: List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
:param pulumi.Input[bool] generate_secret: Should an application secret be generated.
:param pulumi.Input[int] id_token_validity: Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] logout_urls: List of allowed logout URLs for the identity providers.
:param pulumi.Input[str] name: Name of the application client.
:param pulumi.Input[str] prevent_user_existence_errors: Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] read_attributes: List of user pool attributes the application client can read from.
:param pulumi.Input[int] refresh_token_validity: Time limit in days refresh tokens are valid for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] supported_identity_providers: List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
:param pulumi.Input[pulumi.InputType['UserPoolClientTokenValidityUnitsArgs']] token_validity_units: Configuration block for units in which the validity times are represented in. Detailed below.
:param pulumi.Input[str] user_pool_id: User pool the client belongs to.
:param pulumi.Input[Sequence[pulumi.Input[str]]] write_attributes: List of user pool attributes the application client can write to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserPoolClientState.__new__(_UserPoolClientState)
__props__.__dict__["access_token_validity"] = access_token_validity
__props__.__dict__["allowed_oauth_flows"] = allowed_oauth_flows
__props__.__dict__["allowed_oauth_flows_user_pool_client"] = allowed_oauth_flows_user_pool_client
__props__.__dict__["allowed_oauth_scopes"] = allowed_oauth_scopes
__props__.__dict__["analytics_configuration"] = analytics_configuration
__props__.__dict__["callback_urls"] = callback_urls
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["default_redirect_uri"] = default_redirect_uri
__props__.__dict__["enable_token_revocation"] = enable_token_revocation
__props__.__dict__["explicit_auth_flows"] = explicit_auth_flows
__props__.__dict__["generate_secret"] = generate_secret
__props__.__dict__["id_token_validity"] = id_token_validity
__props__.__dict__["logout_urls"] = logout_urls
__props__.__dict__["name"] = name
__props__.__dict__["prevent_user_existence_errors"] = prevent_user_existence_errors
__props__.__dict__["read_attributes"] = read_attributes
__props__.__dict__["refresh_token_validity"] = refresh_token_validity
__props__.__dict__["supported_identity_providers"] = supported_identity_providers
__props__.__dict__["token_validity_units"] = token_validity_units
__props__.__dict__["user_pool_id"] = user_pool_id
__props__.__dict__["write_attributes"] = write_attributes
return UserPoolClient(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessTokenValidity")
def access_token_validity(self) -> pulumi.Output[Optional[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "access_token_validity")
@property
@pulumi.getter(name="allowedOauthFlows")
def allowed_oauth_flows(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of allowed OAuth flows (code, implicit, client_credentials).
"""
return pulumi.get(self, "allowed_oauth_flows")
@property
@pulumi.getter(name="allowedOauthFlowsUserPoolClient")
def allowed_oauth_flows_user_pool_client(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.
"""
return pulumi.get(self, "allowed_oauth_flows_user_pool_client")
@property
@pulumi.getter(name="allowedOauthScopes")
def allowed_oauth_scopes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of allowed OAuth scopes (phone, email, openid, profile, and aws.cognito.signin.user.admin).
"""
return pulumi.get(self, "allowed_oauth_scopes")
@property
@pulumi.getter(name="analyticsConfiguration")
def analytics_configuration(self) -> pulumi.Output[Optional['outputs.UserPoolClientAnalyticsConfiguration']]:
"""
Configuration block for Amazon Pinpoint analytics for collecting metrics for this user pool. Detailed below.
"""
return pulumi.get(self, "analytics_configuration")
@property
@pulumi.getter(name="callbackUrls")
def callback_urls(self) -> pulumi.Output[Sequence[str]]:
"""
List of allowed callback URLs for the identity providers.
"""
return pulumi.get(self, "callback_urls")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[str]:
"""
Client secret of the user pool client.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="defaultRedirectUri")
def default_redirect_uri(self) -> pulumi.Output[Optional[str]]:
"""
Default redirect URI. Must be in the list of callback URLs.
"""
return pulumi.get(self, "default_redirect_uri")
@property
@pulumi.getter(name="enableTokenRevocation")
def enable_token_revocation(self) -> pulumi.Output[bool]:
"""
Enables or disables token revocation.
"""
return pulumi.get(self, "enable_token_revocation")
@property
@pulumi.getter(name="explicitAuthFlows")
def explicit_auth_flows(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of authentication flows (ADMIN_NO_SRP_AUTH, CUSTOM_AUTH_FLOW_ONLY, USER_PASSWORD_AUTH, ALLOW_ADMIN_USER_PASSWORD_AUTH, ALLOW_CUSTOM_AUTH, ALLOW_USER_PASSWORD_AUTH, ALLOW_USER_SRP_AUTH, ALLOW_REFRESH_TOKEN_AUTH).
"""
return pulumi.get(self, "explicit_auth_flows")
@property
@pulumi.getter(name="generateSecret")
def generate_secret(self) -> pulumi.Output[Optional[bool]]:
"""
Should an application secret be generated.
"""
return pulumi.get(self, "generate_secret")
@property
@pulumi.getter(name="idTokenValidity")
def id_token_validity(self) -> pulumi.Output[Optional[int]]:
"""
Time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if you have entered a value in `token_validity_units`.
"""
return pulumi.get(self, "id_token_validity")
@property
@pulumi.getter(name="logoutUrls")
def logout_urls(self) -> pulumi.Output[Sequence[str]]:
"""
List of allowed logout URLs for the identity providers.
"""
return pulumi.get(self, "logout_urls")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the application client.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="preventUserExistenceErrors")
def prevent_user_existence_errors(self) -> pulumi.Output[str]:
"""
Choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY`, those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.
"""
return pulumi.get(self, "prevent_user_existence_errors")
@property
@pulumi.getter(name="readAttributes")
def read_attributes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of user pool attributes the application client can read from.
"""
return pulumi.get(self, "read_attributes")
@property
@pulumi.getter(name="refreshTokenValidity")
def refresh_token_validity(self) -> pulumi.Output[Optional[int]]:
"""
Time limit in days refresh tokens are valid for.
"""
return pulumi.get(self, "refresh_token_validity")
@property
@pulumi.getter(name="supportedIdentityProviders")
def supported_identity_providers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of provider names for the identity providers that are supported on this client. Uses the `provider_name` attribute of `cognito.IdentityProvider` resource(s), or the equivalent string(s).
"""
return pulumi.get(self, "supported_identity_providers")
@property
@pulumi.getter(name="tokenValidityUnits")
def token_validity_units(self) -> pulumi.Output[Optional['outputs.UserPoolClientTokenValidityUnits']]:
"""
Configuration block for units in which the validity times are represented in. Detailed below.
"""
return pulumi.get(self, "token_validity_units")
@property
@pulumi.getter(name="userPoolId")
def user_pool_id(self) -> pulumi.Output[str]:
"""
User pool the client belongs to.
"""
return pulumi.get(self, "user_pool_id")
@property
@pulumi.getter(name="writeAttributes")
def write_attributes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of user pool attributes the application client can write to.
"""
return pulumi.get(self, "write_attributes")
| 56.10813 | 627 | 0.697101 | 8,319 | 69,013 | 5.532276 | 0.039909 | 0.08939 | 0.074724 | 0.056493 | 0.957782 | 0.950829 | 0.949743 | 0.947418 | 0.945701 | 0.933577 | 0 | 0.001117 | 0.208584 | 69,013 | 1,229 | 628 | 56.153784 | 0.841517 | 0.384927 | 0 | 0.879756 | 1 | 0 | 0.144185 | 0.072349 | 0 | 0 | 0 | 0 | 0 | 1 | 0.167428 | false | 0.001522 | 0.010654 | 0 | 0.278539 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c7de307ac0b393af0590d190cacf1869f1238a25 | 15,405 | py | Python | quest/wishing_well.py | onefforts/dfk | d5c6e621c51be7978cfbb26685c394f6b3e52664 | [
"MIT"
] | null | null | null | quest/wishing_well.py | onefforts/dfk | d5c6e621c51be7978cfbb26685c394f6b3e52664 | [
"MIT"
] | null | null | null | quest/wishing_well.py | onefforts/dfk | d5c6e621c51be7978cfbb26685c394f6b3e52664 | [
"MIT"
] | null | null | null | from web3 import Web3
CONTRACT_ADDRESS = '0xf5ff69f4ac4a851730668b93fc408bc1c49ef4ce'
ABI = """
[
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Paused","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"}],"name":"QuestCanceled","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"}],"name":"QuestCompleted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"address","name":"rewardItem","type":"address"},{"indexed":false,"internalType":"uint256","name":"itemQuantity","type":"uint256"}],"name":"QuestReward","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint8","name":"profession","type":"uint8"},{"indexed":false,"internalType":"uint64","name":"skillUp","type":"uint64"}],"name":"QuestSkillUp","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"staminaFullAt","type":"uint256"},{"indexed":false,"internalType":"uint16","name":"staminaSpent","type":"uint16"}],"name":"QuestStaminaSpent","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"}],"name":"QuestStarted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"questId","type":"uint256"},{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint64","name":"xpEarned","type":"uint64"}],"name":"QuestXP","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"previousAdminRole","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"newAdminRole","type":"bytes32"}],"name":"RoleAdminChanged","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleGranted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleRevoked","type":"event"},
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Unpaused","type":"event"},
{"inputs":[],"name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"MODERATOR_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"}],"name":"cancelQuest","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"}],"name":"completeQuest","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"randomNumber","type":"uint256"},{"internalType":"uint256","name":"digits","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}],"name":"extractNumber","outputs":[{"internalType":"uint256","name":"result","type":"uint256"}],"stateMutability":"pure","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"}],"name":"getCurrentStamina","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"grantRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"heroToQuest","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"_heroCoreAddress","type":"address"},{"internalType":"address","name":"_statScienceAddress","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[],"name":"lastRewardIndex","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[],"name":"paused","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"questLevel","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"renounceRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"revokeRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"rewardItems","outputs":[{"internalType":"contract IInventoryItem","name":"item","type":"address"},{"internalType":"int64","name":"expBonus","type":"int64"},{"internalType":"int64","name":"skillUpChance","type":"int64"},{"internalType":"int64","name":"smallSkillUpMod","type":"int64"},{"internalType":"int64","name":"mediumSkillUpMod","type":"int64"},{"internalType":"int64","name":"largeSkillUpMod","type":"int64"},{"internalType":"int64","name":"baseChance","type":"int64"},{"internalType":"int64","name":"skillMod","type":"int64"},{"internalType":"int64","name":"statMod","type":"int64"},{"internalType":"int64","name":"luckMod","type":"int64"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"runeRate","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_id","type":"uint256"},{"components":[{"internalType":"contract IInventoryItem","name":"item","type":"address"},{"internalType":"int64","name":"expBonus","type":"int64"},{"internalType":"int64","name":"skillUpChance","type":"int64"},{"internalType":"int64","name":"smallSkillUpMod","type":"int64"},{"internalType":"int64","name":"mediumSkillUpMod","type":"int64"},{"internalType":"int64","name":"largeSkillUpMod","type":"int64"},{"internalType":"int64","name":"baseChance","type":"int64"},{"internalType":"int64","name":"skillMod","type":"int64"},{"internalType":"int64","name":"statMod","type":"int64"},{"internalType":"int64","name":"luckMod","type":"int64"}],"internalType":"struct ProfessionQuest.RewardItem","name":"_item","type":"tuple"}],"name":"setRewardItem","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_rate","type":"uint256"}],"name":"setTearRate","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_timePerStamina","type":"uint256"}],"name":"setTimePerStamina","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"},{"internalType":"uint8","name":"_attempts","type":"uint8"}],"name":"startQuest","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"tearRate","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"timePerStamina","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"blockNumber","type":"uint256"}],"name":"vrf","outputs":[{"internalType":"bytes32","name":"result","type":"bytes32"}],"stateMutability":"view","type":"function"}
]
"""
def block_explorer_link(txid):
return 'https://explorer.harmony.one/tx/' + str(txid)
def start_quest(hero_id, attempts, private_key, nonce, gas_price_gwei, tx_timeout_seconds, rpc_address, logger):
w3 = Web3(Web3.HTTPProvider(rpc_address))
account = w3.eth.account.privateKeyToAccount(private_key)
w3.eth.default_account = account.address
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
logger.info("Starting quest with hero id " + str(hero_id))
tx = contract.functions.startQuest(hero_id, attempts).buildTransaction(
{'gasPrice': w3.toWei(gas_price_gwei, 'gwei'), 'nonce': nonce})
logger.debug("Signing transaction")
signed_tx = w3.eth.account.sign_transaction(tx, private_key=private_key)
logger.debug("Sending transaction " + str(tx))
ret = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
logger.debug("Transaction successfully sent !")
logger.info(
"Waiting for transaction " + block_explorer_link(signed_tx.hash.hex()) + " to be mined")
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash=signed_tx.hash, timeout=tx_timeout_seconds,
poll_latency=3)
logger.info("Transaction mined !")
logger.info(str(tx_receipt))
def complete_quest(hero_id, private_key, nonce, gas_price_gwei, tx_timeout_seconds, rpc_address, logger):
w3 = Web3(Web3.HTTPProvider(rpc_address))
account = w3.eth.account.privateKeyToAccount(private_key)
w3.eth.default_account = account.address
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
logger.debug("Completing quest with hero id " + str(hero_id))
tx = contract.functions.completeQuest(hero_id).buildTransaction(
{'gasPrice': w3.toWei(gas_price_gwei, 'gwei'), 'nonce': nonce})
logger.debug("Signing transaction")
signed_tx = w3.eth.account.sign_transaction(tx, private_key=private_key)
logger.debug("Sending transaction " + str(tx))
ret = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
logger.debug("Transaction successfully sent !")
logger.info(
"Waiting for transaction " + block_explorer_link(signed_tx.hash.hex()) + " to be mined")
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash=signed_tx.hash, timeout=tx_timeout_seconds,
poll_latency=3)
logger.info("Transaction mined !")
logger.info(str(tx_receipt))
return tx_receipt
def parse_complete_quest_receipt(tx_receipt, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
quest_result = {}
quest_reward = contract.events.QuestReward().processReceipt(tx_receipt)
quest_result['tear'] = sum([result.args.itemQuantity for result in quest_reward])
quest_xp = contract.events.QuestXP().processReceipt(tx_receipt)
quest_result['xp'] = sum([result.args.xpEarned for result in quest_xp])
return quest_result
def quest_level(rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.questLevel().call()
return result
def rewards(quest_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.rewardItems(quest_id).call()
return result
def last_reward_index(rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.lastRewardIndex().call()
return result
def hero_to_quest(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.heroToQuest(hero_id).call()
return result
def get_current_stamina(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.getCurrentStamina(hero_id).call()
return result
| 90.087719 | 888 | 0.666861 | 1,571 | 15,405 | 6.44303 | 0.120942 | 0.054337 | 0.084074 | 0.053349 | 0.820984 | 0.790852 | 0.781367 | 0.754495 | 0.753408 | 0.712112 | 0 | 0.031773 | 0.084713 | 15,405 | 170 | 889 | 90.617647 | 0.686099 | 0 | 0 | 0.44186 | 0 | 0.294574 | 0.719766 | 0.659266 | 0 | 0 | 0.002726 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.007752 | 0.007752 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
400ba0e7e3f7f11a11da49ae853438e8faef2e3c | 19,963 | py | Python | ptvapi/api/departures_api.py | richardjkendall/ptv-api-client | 9b1a3882ebee8cef363e688a56b90e2643799a88 | [
"MIT"
] | null | null | null | ptvapi/api/departures_api.py | richardjkendall/ptv-api-client | 9b1a3882ebee8cef363e688a56b90e2643799a88 | [
"MIT"
] | null | null | null | ptvapi/api/departures_api.py | richardjkendall/ptv-api-client | 9b1a3882ebee8cef363e688a56b90e2643799a88 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
PTV Timetable API - Version 3
The PTV Timetable API provides direct access to Public Transport Victoria’s public transport timetable data. The API returns scheduled timetable, route and stop data for all metropolitan and regional train, tram and bus services in Victoria, including Night Network(Night Train and Night Tram data are included in metropolitan train and tram services data, respectively, whereas Night Bus is a separate route type). The API also returns real-time data for metropolitan train, tram and bus services (where this data is made available to PTV), as well as disruption information, stop facility information, and access to myki ticket outlet data. This Swagger is for Version 3 of the PTV Timetable API. By using this documentation you agree to comply with the licence and terms of service. Train timetable data is updated daily, while the remaining data is updated weekly, taking into account any planned timetable changes (for example, due to holidays or planned disruptions). The PTV timetable API is the same API used by PTV for its apps. To access the most up to date data PTV has (including real-time data) you must use the API dynamically. You can access the PTV Timetable API through a HTTP or HTTPS interface, as follows: base URL / version number / API name / query string The base URL is either: * http://timetableapi.ptv.vic.gov.au or * https://timetableapi.ptv.vic.gov.au The Swagger JSON file is available at http://timetableapi.ptv.vic.gov.au/swagger/docs/v3 Frequently asked questions are available on the PTV website at http://ptv.vic.gov.au/apifaq Links to the following information are also provided on the PTV website at http://ptv.vic.gov.au/ptv-timetable-api/ * How to register for an API key and calculate a signature * PTV Timetable API V2 to V3 Migration Guide * Documentation for Version 2 of the PTV Timetable API * PTV Timetable API Data Quality Statement All information about how to use the API is in this documentation. PTV cannot provide technical support for the API. Credits: This page has been based on Steve Bennett's http://opentransportdata.org/, used with permission. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ptvapi.api_client import ApiClient
class DeparturesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def departures_get_for_stop(self, route_type, stop_id, **kwargs): # noqa: E501
"""View departures for all routes from a stop # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.departures_get_for_stop(route_type, stop_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int route_type: Number identifying transport mode; values returned via RouteTypes API (required)
:param int stop_id: Identifier of stop; values returned by Stops API (required)
:param list[int] platform_numbers: Filter by platform number at stop
:param int direction_id: Filter by identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
:param bool look_backwards: Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
:param bool gtfs: Indicates that stop_id parameter will accept \"GTFS stop_id\" data
:param datetime date_utc: Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
:param int max_results: Maximum number of results returned
:param bool include_cancelled: Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
:param list[str] expand: List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
:param str token: Please ignore
:param str devid: Your developer id
:param str signature: Authentication signature for request
:return: V3DeparturesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.departures_get_for_stop_with_http_info(route_type, stop_id, **kwargs) # noqa: E501
else:
(data) = self.departures_get_for_stop_with_http_info(route_type, stop_id, **kwargs) # noqa: E501
return data
def departures_get_for_stop_with_http_info(self, route_type, stop_id, **kwargs): # noqa: E501
"""View departures for all routes from a stop # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.departures_get_for_stop_with_http_info(route_type, stop_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int route_type: Number identifying transport mode; values returned via RouteTypes API (required)
:param int stop_id: Identifier of stop; values returned by Stops API (required)
:param list[int] platform_numbers: Filter by platform number at stop
:param int direction_id: Filter by identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
:param bool look_backwards: Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
:param bool gtfs: Indicates that stop_id parameter will accept \"GTFS stop_id\" data
:param datetime date_utc: Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
:param int max_results: Maximum number of results returned
:param bool include_cancelled: Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
:param list[str] expand: List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
:param str token: Please ignore
:param str devid: Your developer id
:param str signature: Authentication signature for request
:return: V3DeparturesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['route_type', 'stop_id', 'platform_numbers', 'direction_id', 'look_backwards', 'gtfs', 'date_utc', 'max_results', 'include_cancelled', 'expand', 'token', 'devid', 'signature'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method departures_get_for_stop" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'route_type' is set
if ('route_type' not in params or
params['route_type'] is None):
raise ValueError("Missing the required parameter `route_type` when calling `departures_get_for_stop`") # noqa: E501
# verify the required parameter 'stop_id' is set
if ('stop_id' not in params or
params['stop_id'] is None):
raise ValueError("Missing the required parameter `stop_id` when calling `departures_get_for_stop`") # noqa: E501
collection_formats = {}
path_params = {}
if 'route_type' in params:
path_params['route_type'] = params['route_type'] # noqa: E501
if 'stop_id' in params:
path_params['stop_id'] = params['stop_id'] # noqa: E501
query_params = []
if 'platform_numbers' in params:
query_params.append(('platform_numbers', params['platform_numbers'])) # noqa: E501
collection_formats['platform_numbers'] = 'multi' # noqa: E501
if 'direction_id' in params:
query_params.append(('direction_id', params['direction_id'])) # noqa: E501
if 'look_backwards' in params:
query_params.append(('look_backwards', params['look_backwards'])) # noqa: E501
if 'gtfs' in params:
query_params.append(('gtfs', params['gtfs'])) # noqa: E501
if 'date_utc' in params:
query_params.append(('date_utc', params['date_utc'])) # noqa: E501
if 'max_results' in params:
query_params.append(('max_results', params['max_results'])) # noqa: E501
if 'include_cancelled' in params:
query_params.append(('include_cancelled', params['include_cancelled'])) # noqa: E501
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
collection_formats['expand'] = 'multi' # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'devid' in params:
query_params.append(('devid', params['devid'])) # noqa: E501
if 'signature' in params:
query_params.append(('signature', params['signature'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v3/departures/route_type/{route_type}/stop/{stop_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V3DeparturesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def departures_get_for_stop_and_route(self, route_type, stop_id, route_id, **kwargs): # noqa: E501
"""View departures for a specific route from a stop # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.departures_get_for_stop_and_route(route_type, stop_id, route_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int route_type: Number identifying transport mode; values returned via RouteTypes API (required)
:param int stop_id: Identifier of stop; values returned by Stops API (required)
:param str route_id: Identifier of route; values returned by Routes API - v3/routes (required)
:param int direction_id: Filter by identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
:param bool look_backwards: Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
:param bool gtfs: Indicates that stop_id parameter will accept \"GTFS stop_id\" data
:param datetime date_utc: Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
:param int max_results: Maximum number of results returned
:param bool include_cancelled: Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
:param list[str] expand: List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
:param str token: Please ignore
:param str devid: Your developer id
:param str signature: Authentication signature for request
:return: V3DeparturesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.departures_get_for_stop_and_route_with_http_info(route_type, stop_id, route_id, **kwargs) # noqa: E501
else:
(data) = self.departures_get_for_stop_and_route_with_http_info(route_type, stop_id, route_id, **kwargs) # noqa: E501
return data
def departures_get_for_stop_and_route_with_http_info(self, route_type, stop_id, route_id, **kwargs): # noqa: E501
"""View departures for a specific route from a stop # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.departures_get_for_stop_and_route_with_http_info(route_type, stop_id, route_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int route_type: Number identifying transport mode; values returned via RouteTypes API (required)
:param int stop_id: Identifier of stop; values returned by Stops API (required)
:param str route_id: Identifier of route; values returned by Routes API - v3/routes (required)
:param int direction_id: Filter by identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
:param bool look_backwards: Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
:param bool gtfs: Indicates that stop_id parameter will accept \"GTFS stop_id\" data
:param datetime date_utc: Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
:param int max_results: Maximum number of results returned
:param bool include_cancelled: Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
:param list[str] expand: List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
:param str token: Please ignore
:param str devid: Your developer id
:param str signature: Authentication signature for request
:return: V3DeparturesResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['route_type', 'stop_id', 'route_id', 'direction_id', 'look_backwards', 'gtfs', 'date_utc', 'max_results', 'include_cancelled', 'expand', 'token', 'devid', 'signature'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method departures_get_for_stop_and_route" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'route_type' is set
if ('route_type' not in params or
params['route_type'] is None):
raise ValueError("Missing the required parameter `route_type` when calling `departures_get_for_stop_and_route`") # noqa: E501
# verify the required parameter 'stop_id' is set
if ('stop_id' not in params or
params['stop_id'] is None):
raise ValueError("Missing the required parameter `stop_id` when calling `departures_get_for_stop_and_route`") # noqa: E501
# verify the required parameter 'route_id' is set
if ('route_id' not in params or
params['route_id'] is None):
raise ValueError("Missing the required parameter `route_id` when calling `departures_get_for_stop_and_route`") # noqa: E501
collection_formats = {}
path_params = {}
if 'route_type' in params:
path_params['route_type'] = params['route_type'] # noqa: E501
if 'stop_id' in params:
path_params['stop_id'] = params['stop_id'] # noqa: E501
if 'route_id' in params:
path_params['route_id'] = params['route_id'] # noqa: E501
query_params = []
if 'direction_id' in params:
query_params.append(('direction_id', params['direction_id'])) # noqa: E501
if 'look_backwards' in params:
query_params.append(('look_backwards', params['look_backwards'])) # noqa: E501
if 'gtfs' in params:
query_params.append(('gtfs', params['gtfs'])) # noqa: E501
if 'date_utc' in params:
query_params.append(('date_utc', params['date_utc'])) # noqa: E501
if 'max_results' in params:
query_params.append(('max_results', params['max_results'])) # noqa: E501
if 'include_cancelled' in params:
query_params.append(('include_cancelled', params['include_cancelled'])) # noqa: E501
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
collection_formats['expand'] = 'multi' # noqa: E501
if 'token' in params:
query_params.append(('token', params['token'])) # noqa: E501
if 'devid' in params:
query_params.append(('devid', params['devid'])) # noqa: E501
if 'signature' in params:
query_params.append(('signature', params['signature'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v3/departures/route_type/{route_type}/stop/{stop_id}/route/{route_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V3DeparturesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 60.129518 | 2,172 | 0.669839 | 2,610 | 19,963 | 4.945977 | 0.126437 | 0.034085 | 0.030289 | 0.030909 | 0.844295 | 0.829654 | 0.819661 | 0.811372 | 0.810442 | 0.804632 | 0 | 0.014055 | 0.244402 | 19,963 | 331 | 2,173 | 60.311178 | 0.841753 | 0.510444 | 0 | 0.786127 | 0 | 0 | 0.253303 | 0.052184 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028902 | false | 0 | 0.023121 | 0 | 0.092486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
405299304a10a05ab8a4943c6d5b77abc167e9a0 | 2,127 | py | Python | tests/test_provider_techBeck03_infoblox.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | tests/test_provider_techBeck03_infoblox.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | tests/test_provider_techBeck03_infoblox.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # tests/test_provider_techBeck03_infoblox.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:08 UTC)
def test_provider_import():
import terrascript.provider.techBeck03.infoblox
def test_resource_import():
from terrascript.resource.techBeck03.infoblox import infoblox_a_record
from terrascript.resource.techBeck03.infoblox import infoblox_alias_record
from terrascript.resource.techBeck03.infoblox import infoblox_cname_record
from terrascript.resource.techBeck03.infoblox import infoblox_fixed_address
from terrascript.resource.techBeck03.infoblox import infoblox_host_record
from terrascript.resource.techBeck03.infoblox import infoblox_network
from terrascript.resource.techBeck03.infoblox import infoblox_ptr_record
from terrascript.resource.techBeck03.infoblox import infoblox_range
def test_datasource_import():
from terrascript.data.techBeck03.infoblox import infoblox_a_record
from terrascript.data.techBeck03.infoblox import infoblox_alias_record
from terrascript.data.techBeck03.infoblox import infoblox_cname_record
from terrascript.data.techBeck03.infoblox import infoblox_fixed_address
from terrascript.data.techBeck03.infoblox import infoblox_grid
from terrascript.data.techBeck03.infoblox import infoblox_grid_member
from terrascript.data.techBeck03.infoblox import infoblox_host_record
from terrascript.data.techBeck03.infoblox import infoblox_network
from terrascript.data.techBeck03.infoblox import infoblox_ptr_record
from terrascript.data.techBeck03.infoblox import infoblox_range
from terrascript.data.techBeck03.infoblox import infoblox_sequential_address_block
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.techBeck03.infoblox
#
# t = terrascript.provider.techBeck03.infoblox.infoblox()
# s = str(t)
#
# assert 'https://github.com/techBeck03/terraform-provider-infoblox' in s
# assert '2.0.7' in s
| 33.761905 | 86 | 0.811942 | 258 | 2,127 | 6.515504 | 0.271318 | 0.246282 | 0.271267 | 0.361689 | 0.737061 | 0.679952 | 0.679952 | 0.575253 | 0 | 0 | 0 | 0.033999 | 0.12882 | 2,127 | 62 | 87 | 34.306452 | 0.873179 | 0.240715 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 0 | 1 | 0.130435 | true | 0 | 1 | 0 | 1.130435 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
dc1269cff4ed4dca8112afd54c5f2c2d3e25816a | 149 | py | Python | geomstats/backend/pytorch_testing.py | tolgabirdal/geomstats | 27667a9fbd8d3b8fa7a5da0e34d880ce0ad39d51 | [
"MIT"
] | null | null | null | geomstats/backend/pytorch_testing.py | tolgabirdal/geomstats | 27667a9fbd8d3b8fa7a5da0e34d880ce0ad39d51 | [
"MIT"
] | null | null | null | geomstats/backend/pytorch_testing.py | tolgabirdal/geomstats | 27667a9fbd8d3b8fa7a5da0e34d880ce0ad39d51 | [
"MIT"
] | null | null | null | """Pytorch based testing backend."""
import torch
def assert_allclose(*args, **kwargs):
return torch.testing.assert_allclose(*args, **kwargs)
| 18.625 | 57 | 0.724832 | 18 | 149 | 5.888889 | 0.666667 | 0.264151 | 0.339623 | 0.45283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.127517 | 149 | 7 | 58 | 21.285714 | 0.815385 | 0.201342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.666667 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 10 |
905e15ca76daafd48e6d18c8d8bb2a589b0d5e25 | 5,213 | py | Python | torchdistill/models/custom/bottleneck/detection/rcnn.py | wx-b/torchdistill | bdd6e5376f578d0c217aaf27aaf1c303aef005b1 | [
"MIT"
] | 1 | 2022-03-25T05:05:55.000Z | 2022-03-25T05:05:55.000Z | torchdistill/models/custom/bottleneck/detection/rcnn.py | wx-b/torchdistill | bdd6e5376f578d0c217aaf27aaf1c303aef005b1 | [
"MIT"
] | null | null | null | torchdistill/models/custom/bottleneck/detection/rcnn.py | wx-b/torchdistill | bdd6e5376f578d0c217aaf27aaf1c303aef005b1 | [
"MIT"
] | null | null | null | from torch.hub import load_state_dict_from_url
from torchvision.models.detection.faster_rcnn import FasterRCNN, model_urls as fasterrcnn_model_urls
from torchvision.models.detection.keypoint_rcnn import KeypointRCNN, model_urls as keypointrcnn_model_urls
from torchvision.models.detection.mask_rcnn import MaskRCNN, model_urls as maskrcnn_model_urls
from torchvision.ops import MultiScaleRoIAlign
from torchdistill.models.custom.bottleneck.detection.resnet_backbone import custom_resnet_fpn_backbone
from torchdistill.models.registry import register_model_func
@register_model_func
def custom_fasterrcnn_resnet_fpn(backbone, pretrained=True, progress=True,
num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs):
backbone_name = backbone['name']
backbone_params_config = backbone['params']
assert 0 <= trainable_backbone_layers <= 5
# dont freeze any layers if pretrained model or backbone is not used
if not (pretrained or pretrained_backbone):
backbone_params_config['trainable_backbone_layers'] = 5
if pretrained:
# no need to download the backbone if pretrained is set
backbone_params_config['pretrained'] = False
backbone_model = custom_resnet_fpn_backbone(backbone_name, backbone_params_config)
num_feature_maps = len(backbone_model.body.return_layers)
box_roi_pool = None if num_feature_maps == 4 \
else MultiScaleRoIAlign(featmap_names=[str(i) for i in range(num_feature_maps)],
output_size=7, sampling_ratio=2)
model = FasterRCNN(backbone_model, num_classes, box_roi_pool=box_roi_pool, **kwargs)
if pretrained and backbone_name.endswith('resnet50'):
state_dict = load_state_dict_from_url(fasterrcnn_model_urls['fasterrcnn_resnet50_fpn_coco'], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
@register_model_func
def custom_maskrcnn_resnet_fpn(backbone, pretrained=True, progress=True,
num_classes=91, pretrained_backbone=True, trainable_backbone_layers=3, **kwargs):
backbone_name = backbone['name']
backbone_params_config = backbone['params']
assert 0 <= trainable_backbone_layers <= 5
# dont freeze any layers if pretrained model or backbone is not used
if not (pretrained or pretrained_backbone):
backbone_params_config['trainable_backbone_layers'] = 5
if pretrained:
# no need to download the backbone if pretrained is set
backbone_params_config['pretrained'] = False
backbone_model = custom_resnet_fpn_backbone(backbone_name, backbone_params_config)
num_feature_maps = len(backbone_model.body.return_layers)
box_roi_pool = None if num_feature_maps == 4 \
else MultiScaleRoIAlign(featmap_names=[str(i) for i in range(num_feature_maps)],
output_size=7, sampling_ratio=2)
mask_roi_pool = None if num_feature_maps == 4 \
else MultiScaleRoIAlign(featmap_names=[str(i) for i in range(num_feature_maps)],
output_size=14, sampling_ratio=2)
model = MaskRCNN(backbone_model, num_classes, box_roi_pool=box_roi_pool, mask_roi_pool=mask_roi_pool **kwargs)
if pretrained and backbone_name.endswith('resnet50'):
state_dict = load_state_dict_from_url(maskrcnn_model_urls['maskrcnn_resnet50_fpn_coco'], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
@register_model_func
def custom_keypointrcnn_resnet_fpn(backbone, pretrained=True, progress=True, num_classes=2, num_keypoints=17,
pretrained_backbone=True, trainable_backbone_layers=3, **kwargs):
backbone_name = backbone['name']
backbone_params_config = backbone['params']
assert 0 <= trainable_backbone_layers <= 5
# dont freeze any layers if pretrained model or backbone is not used
if not (pretrained or pretrained_backbone):
backbone_params_config['trainable_backbone_layers'] = 5
if pretrained:
# no need to download the backbone if pretrained is set
backbone_params_config['pretrained'] = False
backbone_model = custom_resnet_fpn_backbone(backbone_name, backbone_params_config)
num_feature_maps = len(backbone_model.body.return_layers)
box_roi_pool = None if num_feature_maps == 4 \
else MultiScaleRoIAlign(featmap_names=[str(i) for i in range(num_feature_maps)],
output_size=7, sampling_ratio=2)
keypoint_roi_pool = None if num_feature_maps == 4 \
else MultiScaleRoIAlign(featmap_names=[str(i) for i in range(num_feature_maps)],
output_size=14, sampling_ratio=2)
model = KeypointRCNN(backbone_model, num_classes, num_keypoints=num_keypoints, box_roi_pool=box_roi_pool,
keypoint_roi_pool=keypoint_roi_pool, **kwargs)
if pretrained and backbone_name.endswith('resnet50'):
state_dict = \
load_state_dict_from_url(keypointrcnn_model_urls['keypointrcnn_resnet50_fpn_coco'], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
| 56.663043 | 119 | 0.739497 | 677 | 5,213 | 5.348597 | 0.141802 | 0.057995 | 0.050262 | 0.043082 | 0.831814 | 0.808616 | 0.781552 | 0.781552 | 0.781552 | 0.766915 | 0 | 0.011364 | 0.189718 | 5,213 | 91 | 120 | 57.285714 | 0.845881 | 0.069442 | 0 | 0.72 | 0 | 0 | 0.050165 | 0.032824 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.04 | false | 0 | 0.093333 | 0 | 0.173333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
907de9a5b1b4885cdbca6327b6442ad30ccf603b | 244 | py | Python | app_root/settings.py | fangMint/django_web | cf50df6c1a2358996620ac83ffa99b31472d3c07 | [
"AFL-3.0"
] | null | null | null | app_root/settings.py | fangMint/django_web | cf50df6c1a2358996620ac83ffa99b31472d3c07 | [
"AFL-3.0"
] | null | null | null | app_root/settings.py | fangMint/django_web | cf50df6c1a2358996620ac83ffa99b31472d3c07 | [
"AFL-3.0"
] | 1 | 2021-11-22T10:15:50.000Z | 2021-11-22T10:15:50.000Z | from .setting_diff.env_specify import *
if get_env == "dev":
from .setting_diff.setting_dev import *
elif get_env == "test":
from .setting_diff.setting_test import *
elif get_env == "prod":
from .setting_diff.setting_prod import *
| 27.111111 | 44 | 0.721311 | 36 | 244 | 4.583333 | 0.333333 | 0.266667 | 0.363636 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168033 | 244 | 8 | 45 | 30.5 | 0.812808 | 0 | 0 | 0 | 0 | 0 | 0.045082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.571429 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
908dca1e8b9804cb62642e5176f370a86f3ce48d | 7,828 | py | Python | torch_sparse/cat.py | KonstantinKlepikov/pytorch_sparse | 468aea5b8a94a037659adb632900ad90f680be34 | [
"MIT"
] | 1 | 2020-06-29T19:15:59.000Z | 2020-06-29T19:15:59.000Z | torch_sparse/cat.py | KonstantinKlepikov/pytorch_sparse | 468aea5b8a94a037659adb632900ad90f680be34 | [
"MIT"
] | null | null | null | torch_sparse/cat.py | KonstantinKlepikov/pytorch_sparse | 468aea5b8a94a037659adb632900ad90f680be34 | [
"MIT"
] | null | null | null | from typing import Optional, List
import torch
from torch_sparse.storage import SparseStorage
from torch_sparse.tensor import SparseTensor
def cat(tensors: List[SparseTensor], dim: int) -> SparseTensor:
assert len(tensors) > 0
if dim < 0:
dim = tensors[0].dim() + dim
if dim == 0:
rows: List[torch.Tensor] = []
rowptrs: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
rowcounts: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row = tensor.storage._row
if row is not None:
rows.append(row + sparse_sizes[0])
rowptr = tensor.storage._rowptr
if rowptr is not None:
if len(rowptrs) > 0:
rowptr = rowptr[1:]
rowptrs.append(rowptr + nnz)
cols.append(tensor.storage._col)
value = tensor.storage._value
if value is not None:
values.append(value)
rowcount = tensor.storage._rowcount
if rowcount is not None:
rowcounts.append(rowcount)
sparse_sizes[0] += tensor.sparse_size(0)
sparse_sizes[1] = max(sparse_sizes[1], tensor.sparse_size(1))
nnz += tensor.nnz()
row: Optional[torch.Tensor] = None
if len(rows) == len(tensors):
row = torch.cat(rows, dim=0)
rowptr: Optional[torch.Tensor] = None
if len(rowptrs) == len(tensors):
rowptr = torch.cat(rowptrs, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
rowcount: Optional[torch.Tensor] = None
if len(rowcounts) == len(tensors):
rowcount = torch.cat(rowcounts, dim=0)
storage = SparseStorage(row=row, rowptr=rowptr, col=col, value=value,
sparse_sizes=sparse_sizes, rowcount=rowcount,
colptr=None, colcount=None, csr2csc=None,
csc2csr=None, is_sorted=True)
return tensors[0].from_storage(storage)
elif dim == 1:
rows: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
colptrs: List[torch.Tensor] = []
colcounts: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row, col, value = tensor.coo()
rows.append(row)
cols.append(tensor.storage._col + sparse_sizes[1])
if value is not None:
values.append(value)
colptr = tensor.storage._colptr
if colptr is not None:
if len(colptrs) > 0:
colptr = colptr[1:]
colptrs.append(colptr + nnz)
colcount = tensor.storage._colcount
if colcount is not None:
colcounts.append(colcount)
sparse_sizes[0] = max(sparse_sizes[0], tensor.sparse_size(0))
sparse_sizes[1] += tensor.sparse_size(1)
nnz += tensor.nnz()
row = torch.cat(rows, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
colptr: Optional[torch.Tensor] = None
if len(colptrs) == len(tensors):
colptr = torch.cat(colptrs, dim=0)
colcount: Optional[torch.Tensor] = None
if len(colcounts) == len(tensors):
colcount = torch.cat(colcounts, dim=0)
storage = SparseStorage(row=row, rowptr=None, col=col, value=value,
sparse_sizes=sparse_sizes, rowcount=None,
colptr=colptr, colcount=colcount, csr2csc=None,
csc2csr=None, is_sorted=False)
return tensors[0].from_storage(storage)
elif dim > 1 and dim < tensors[0].dim():
values: List[torch.Tensor] = []
for tensor in tensors:
value = tensor.storage.value()
if value is not None:
values.append(value)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=dim - 1)
return tensors[0].set_value(value, layout='coo')
else:
raise IndexError(
(f'Dimension out of range: Expected to be in range of '
f'[{-tensors[0].dim()}, {tensors[0].dim() - 1}], but got {dim}.'))
def cat_diag(tensors: List[SparseTensor]) -> SparseTensor:
assert len(tensors) > 0
rows: List[torch.Tensor] = []
rowptrs: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
rowcounts: List[torch.Tensor] = []
colptrs: List[torch.Tensor] = []
colcounts: List[torch.Tensor] = []
csr2cscs: List[torch.Tensor] = []
csc2csrs: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row = tensor.storage._row
if row is not None:
rows.append(row + sparse_sizes[0])
rowptr = tensor.storage._rowptr
if rowptr is not None:
if len(rowptrs) > 0:
rowptr = rowptr[1:]
rowptrs.append(rowptr + nnz)
cols.append(tensor.storage._col + sparse_sizes[1])
value = tensor.storage._value
if value is not None:
values.append(value)
rowcount = tensor.storage._rowcount
if rowcount is not None:
rowcounts.append(rowcount)
colptr = tensor.storage._colptr
if colptr is not None:
if len(colptrs) > 0:
colptr = colptr[1:]
colptrs.append(colptr + nnz)
colcount = tensor.storage._colcount
if colcount is not None:
colcounts.append(colcount)
csr2csc = tensor.storage._csr2csc
if csr2csc is not None:
csr2cscs.append(csr2csc + nnz)
csc2csr = tensor.storage._csc2csr
if csc2csr is not None:
csc2csrs.append(csc2csr + nnz)
sparse_sizes[0] += tensor.sparse_size(0)
sparse_sizes[1] += tensor.sparse_size(1)
nnz += tensor.nnz()
row: Optional[torch.Tensor] = None
if len(rows) == len(tensors):
row = torch.cat(rows, dim=0)
rowptr: Optional[torch.Tensor] = None
if len(rowptrs) == len(tensors):
rowptr = torch.cat(rowptrs, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
rowcount: Optional[torch.Tensor] = None
if len(rowcounts) == len(tensors):
rowcount = torch.cat(rowcounts, dim=0)
colptr: Optional[torch.Tensor] = None
if len(colptrs) == len(tensors):
colptr = torch.cat(colptrs, dim=0)
colcount: Optional[torch.Tensor] = None
if len(colcounts) == len(tensors):
colcount = torch.cat(colcounts, dim=0)
csr2csc: Optional[torch.Tensor] = None
if len(csr2cscs) == len(tensors):
csr2csc = torch.cat(csr2cscs, dim=0)
csc2csr: Optional[torch.Tensor] = None
if len(csc2csrs) == len(tensors):
csc2csr = torch.cat(csc2csrs, dim=0)
storage = SparseStorage(row=row, rowptr=rowptr, col=col, value=value,
sparse_sizes=sparse_sizes, rowcount=rowcount,
colptr=colptr, colcount=colcount, csr2csc=csr2csc,
csc2csr=csc2csr, is_sorted=True)
return tensors[0].from_storage(storage)
| 32.481328 | 79 | 0.561191 | 916 | 7,828 | 4.7369 | 0.078603 | 0.091265 | 0.06914 | 0.084812 | 0.828301 | 0.795114 | 0.767458 | 0.760083 | 0.723208 | 0.681263 | 0 | 0.01847 | 0.322177 | 7,828 | 240 | 80 | 32.616667 | 0.799284 | 0 | 0 | 0.755435 | 0 | 0.005435 | 0.014691 | 0.002683 | 0 | 0 | 0 | 0 | 0.01087 | 1 | 0.01087 | false | 0 | 0.021739 | 0 | 0.054348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
908e094eb79a1ead6c8aac3d9048eec008fd584a | 340 | py | Python | lightning_transformers/task/nlp/text_classification/__init__.py | techthiyanes/lightning-transformers | 4832f00ee26ad8d3d39e77d0d89f220bcc8de944 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/text_classification/__init__.py | techthiyanes/lightning-transformers | 4832f00ee26ad8d3d39e77d0d89f220bcc8de944 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/text_classification/__init__.py | techthiyanes/lightning-transformers | 4832f00ee26ad8d3d39e77d0d89f220bcc8de944 | [
"Apache-2.0"
] | null | null | null | from lightning_transformers.task.nlp.text_classification.config import TextClassificationDataConfig # noqa: F401
from lightning_transformers.task.nlp.text_classification.data import TextClassificationDataModule # noqa: F401
from lightning_transformers.task.nlp.text_classification.model import TextClassificationTransformer # noqa: F401
| 85 | 113 | 0.876471 | 36 | 340 | 8.111111 | 0.444444 | 0.133562 | 0.256849 | 0.297945 | 0.568493 | 0.568493 | 0.568493 | 0.39726 | 0.39726 | 0 | 0 | 0.028481 | 0.070588 | 340 | 3 | 114 | 113.333333 | 0.89557 | 0.094118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
90b5ca6fbfa9e584072081928e14054373e0aeda | 18,815 | py | Python | tests/test_dynamodb2/test_dynamodb_validation.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | null | null | null | tests/test_dynamodb2/test_dynamodb_validation.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | 4 | 2017-09-30T07:52:52.000Z | 2021-12-13T06:56:55.000Z | tests/test_dynamodb2/test_dynamodb_validation.py | oakbramble/moto | 6350d8ec4c59eaf12b83385b6acd386e5c2f5593 | [
"Apache-2.0"
] | 2 | 2021-11-24T08:05:43.000Z | 2021-11-25T16:18:48.000Z | import pytest
from moto.dynamodb2.exceptions import (
AttributeIsReservedKeyword,
ExpressionAttributeValueNotDefined,
AttributeDoesNotExist,
ExpressionAttributeNameNotDefined,
IncorrectOperandType,
InvalidUpdateExpressionInvalidDocumentPath,
EmptyKeyAttributeException,
)
from moto.dynamodb2.models import Item, DynamoType
from moto.dynamodb2.parsing.ast_nodes import (
NodeDepthLeftTypeFetcher,
UpdateExpressionSetAction,
DDBTypedValue,
)
from moto.dynamodb2.parsing.expressions import UpdateExpressionParser
from moto.dynamodb2.parsing.validators import UpdateExpressionValidator
def test_valid_update_expression(table):
update_expression = "set forum_name=:NewName, forum_type=:NewType"
update_expression_values = {
":NewName": {"S": "AmazingForum"},
":NewType": {"S": "BASIC"},
}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "forum_name"}),
hash_key_type="TYPE",
range_key=DynamoType({"S": "forum_type"}),
range_key_type="TYPE",
attrs={"forum_name": {"S": "hello"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
def test_validation_of_empty_string_key_val(table):
with pytest.raises(EmptyKeyAttributeException):
update_expression = "set forum_name=:NewName"
update_expression_values = {":NewName": {"S": ""}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "forum_name"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"forum_name": {"S": "hello"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
def test_validation_of_update_expression_with_keyword(table):
try:
update_expression = "SET myNum = path + :val"
update_expression_values = {":val": {"N": "3"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeIsReservedKeyword as e:
assert e.keyword == "path"
@pytest.mark.parametrize(
"update_expression", ["SET a = #b + :val2", "SET a = :val2 + #b",],
)
def test_validation_of_a_set_statement_with_incorrect_passed_value(
update_expression, table
):
"""
By running permutations it shows that values are replaced prior to resolving attributes.
An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression:
An expression attribute value used in expression is not defined; attribute value: :val2
"""
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "b": {"N": "3"}},
)
try:
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names={"#b": "ok"},
expression_attribute_values={":val": {"N": "3"}},
item=item,
table=table,
).validate()
except ExpressionAttributeValueNotDefined as e:
assert e.attribute_value == ":val2"
def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item(
table,
):
"""
When an update expression tries to get an attribute that does not exist it must throw the appropriate exception.
An error occurred (ValidationException) when calling the UpdateItem operation:
The provided expression refers to an attribute that does not exist in the item
"""
try:
update_expression = "SET a = nonexistent"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeDoesNotExist:
assert True
@pytest.mark.parametrize(
"update_expression", ["SET a = #c", "SET a = #c + #d",],
)
def test_validation_of_update_expression_with_attribute_name_that_is_not_defined(
update_expression, table,
):
"""
When an update expression tries to get an attribute name that is not provided it must throw an exception.
An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression:
An expression attribute name used in the document path is not defined; attribute name: #c
"""
try:
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "path": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names={"#b": "ok"},
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except ExpressionAttributeNameNotDefined as e:
assert e.not_defined_attribute_name == "#c"
def test_validation_of_if_not_exists_not_existing_invalid_replace_value(table):
try:
update_expression = "SET a = if_not_exists(b, a.c)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "No exception raised"
except AttributeDoesNotExist:
assert True
def get_first_node_of_type(ast, node_type):
return next(NodeDepthLeftTypeFetcher(node_type, ast))
def get_set_action_value(ast):
"""
Helper that takes an AST and gets the first UpdateExpressionSetAction and retrieves the value of that action.
This should only be called on validated expressions.
Args:
ast(Node):
Returns:
DynamoType: The DynamoType object representing the Dynamo value.
"""
set_action = get_first_node_of_type(ast, UpdateExpressionSetAction)
typed_value = set_action.children[1]
assert isinstance(typed_value, DDBTypedValue)
dynamo_value = typed_value.children[0]
assert isinstance(dynamo_value, DynamoType)
return dynamo_value
def test_validation_of_if_not_exists_not_existing_value(table):
update_expression = "SET a = if_not_exists(b, a)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"S": "A"})
def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute(
table,
):
update_expression = "SET a = if_not_exists(b, a)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"S": "B"})
def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(table):
update_expression = "SET a = if_not_exists(b, :val)"
update_expression_values = {":val": {"N": "4"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "b": {"N": "3"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "3"})
def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value(
table,
):
update_expression = "SET a = if_not_exists(b, :val)"
update_expression_values = {":val": {"N": "4"}}
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=update_expression_values,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "4"})
def test_validation_of_sum_operation(table):
update_expression = "SET a = a + b"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "7"})
def test_validation_homogeneous_list_append_function(table):
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"L": [{"S": "i3"}, {"S": "i4"}]}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType(
{"L": [{"S": "i1"}, {"S": "i2"}, {"S": "i3"}, {"S": "i4"}]}
)
def test_validation_hetereogenous_list_append_function(table):
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"L": [{"N": "3"}]}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"L": [{"S": "i1"}, {"S": "i2"}, {"N": "3"}]})
def test_validation_list_append_function_with_non_list_arg(table):
"""
Must error out:
Invalid UpdateExpression: Incorrect operand type for operator or function;
operator or function: list_append, operand type: S'
Returns:
"""
try:
update_expression = "SET ri = list_append(ri, :vals)"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":vals": {"S": "N"}},
item=item,
table=table,
).validate()
except IncorrectOperandType as e:
assert e.operand_type == "S"
assert e.operator_or_function == "list_append"
def test_sum_with_incompatible_types(table):
"""
Must error out:
Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S'
Returns:
"""
try:
update_expression = "SET ri = :val + :val2"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}},
item=item,
table=table,
).validate()
except IncorrectOperandType as e:
assert e.operand_type == "S"
assert e.operator_or_function == "+"
def test_validation_of_subraction_operation(table):
update_expression = "SET ri = :val - :val2"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":val": {"N": "1"}, ":val2": {"N": "3"}},
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "-2"})
def test_cannot_index_into_a_string(table):
"""
Must error out:
The document path provided in the update expression is invalid for update'
"""
try:
update_expression = "set itemstr[1]=:Item"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values={":Item": {"S": "string_update"}},
item=item,
table=table,
).validate()
assert False, "Must raise exception"
except InvalidUpdateExpressionInvalidDocumentPath:
assert True
def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute(
table,
):
"""If this step just passes we are happy enough"""
update_expression = "set d=a"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "foo2"}, "a": {"N": "3"}},
)
validated_ast = UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
dynamo_value = get_set_action_value(validated_ast)
assert dynamo_value == DynamoType({"N": "3"})
def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute(
table,
):
try:
update_expression = "set d.e=a"
update_expression_ast = UpdateExpressionParser.make(update_expression)
item = Item(
hash_key=DynamoType({"S": "id"}),
hash_key_type="TYPE",
range_key=None,
range_key_type=None,
attrs={"id": {"S": "foo2"}, "a": {"N": "3"}},
)
UpdateExpressionValidator(
update_expression_ast,
expression_attribute_names=None,
expression_attribute_values=None,
item=item,
table=table,
).validate()
assert False, "Must raise exception"
except InvalidUpdateExpressionInvalidDocumentPath:
assert True
| 34.842593 | 120 | 0.634494 | 2,052 | 18,815 | 5.512183 | 0.098441 | 0.140041 | 0.067191 | 0.072496 | 0.808151 | 0.77933 | 0.756167 | 0.747149 | 0.733269 | 0.719211 | 0 | 0.005281 | 0.245124 | 18,815 | 539 | 121 | 34.907236 | 0.7911 | 0.082381 | 0 | 0.732739 | 0 | 0 | 0.073109 | 0 | 0 | 0 | 0 | 0 | 0.062361 | 1 | 0.048998 | false | 0.002227 | 0.013363 | 0.002227 | 0.066815 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
291cb6a95f93b82906fee5a0165985aa04e8fd08 | 17,163 | py | Python | router/move_calculator_test.py | awesome-archive/city_visit_planner | 20befca3d70db61bc83356eedd490a298b27f96f | [
"MIT"
] | 1 | 2019-11-14T22:08:59.000Z | 2019-11-14T22:08:59.000Z | router/move_calculator_test.py | sandoche/city_visit_planner | 20befca3d70db61bc83356eedd490a298b27f96f | [
"MIT"
] | null | null | null | router/move_calculator_test.py | sandoche/city_visit_planner | 20befca3d70db61bc83356eedd490a298b27f96f | [
"MIT"
] | null | null | null | import unittest
from data import point
from data import city_visit
from router import move_calculator as move_calculator_
class MoveCalculatorTest(unittest.TestCase):
def setUp(self):
# Ferry Building, San Francisco.
self.ferry_building_coordinates = point.Coordinates(37.7955, -122.3937)
# Pier 39, San Francisco.
self.pier_39_coordinates = point.Coordinates(37.8100, -122.4104)
# Place near Pier 39, San Francisco.
self.near_pier_39_coordinates = point.Coordinates(37.8097, -122.4104)
self.walking_speed = 2. # Walking speed in mph.
self.driving_speed = 20. # Speed of car in traffic jams in mph.
# 10 minutes to find and than park a car and 10 minutes to find a parking
# spot when arrived.
self.pause_before_driving = 0.30
self.ptt_speed = 15. # Speed of Public Transportation or Taxi in mph.
# 15 minutes to buy a ticket and wait in case of public transportation or
# call a taxi.
self.pause_before_ptt = 0.25
self.fb_to_p39_walking = 1.916 / self.walking_speed
self.fb_to_np39_walking = 1.894 / self.walking_speed
self.p39_to_np39_walking = 0.029 / self.walking_speed
self.fb_to_p39_driving = 1.916 / self.driving_speed
self.fb_to_np39_driving = 1.894 / self.driving_speed
self.p39_to_np39_driving = 0.029 / self.driving_speed
self.fb_to_p39_ptt = 1.916 / self.ptt_speed
self.fb_to_np39_ptt = 1.894 / self.ptt_speed
self.p39_to_np39_ptt = 0.029 / self.ptt_speed
self.fb_to_p39_pause_and_driving = (
self.fb_to_p39_driving + self.pause_before_driving)
self.fb_to_np39_pause_and_driving = (
self.fb_to_np39_driving + self.pause_before_driving)
self.p39_to_np39_pause_and_driving = (
self.p39_to_np39_driving + self.pause_before_driving)
self.fb_to_p39_pause_and_ptt = self.fb_to_p39_ptt + self.pause_before_ptt
self.fb_to_np39_pause_and_ptt = self.fb_to_np39_ptt + self.pause_before_ptt
self.p39_to_np39_pause_and_ptt = (
self.p39_to_np39_ptt + self.pause_before_ptt)
super(MoveCalculatorTest, self).setUp()
class SimpleMoveCalculatorTest(MoveCalculatorTest):
def testCalculateMoveDescriptionWalking(self):
move_calculator = move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testCalculateMoveDescriptionDriving(self):
move_calculator = move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_driving, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_driving, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_driving, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testCalculateMoveDescriptionPauseBeforeDriving(self):
move_calculator = move_calculator_.SimpleMoveCalculator(
self.driving_speed, city_visit.MoveType.driving, pause=self.pause_before_driving)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_pause_and_driving, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_pause_and_driving, move_description.move_hours,
places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_pause_and_driving, move_description.move_hours,
places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
class MultiMoveCalculatorCalculatorTest(MoveCalculatorTest):
def testCalculateMoveDescriptionGeneral(self):
move_calculator = move_calculator_.MultiMoveCalculator(
[0.5],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(
self.ptt_speed, city_visit.MoveType.ptt, pause=self.pause_before_ptt)])
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_pause_and_ptt, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_pause_and_ptt, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testCalculateMoveDescriptionWalkingOnly(self):
move_calculator = move_calculator_.MultiMoveCalculator(
[2.0],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(
self.ptt_speed, city_visit.MoveType.ptt, pause=self.pause_before_ptt)])
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testCalculateMoveDescriptionPTTOnly(self):
move_calculator = move_calculator_.MultiMoveCalculator(
[0.02],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(
self.ptt_speed, city_visit.MoveType.ptt, pause=self.pause_before_ptt)])
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_pause_and_ptt,
move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_pause_and_ptt,
move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_pause_and_ptt, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testCalculateMoveDescriptionWalkingPTTDriving(self):
move_calculator = move_calculator_.MultiMoveCalculator(
[1.0, 1.9],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)])
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_p39_driving, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.driving, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.pier_39_coordinates, move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.ferry_building_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.fb_to_np39_ptt, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.ptt, move_description.move_type)
self.assertEqual(self.ferry_building_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
move_description = move_calculator.CalculateMoveDescription(
self.pier_39_coordinates, self.near_pier_39_coordinates)
self.assertAlmostEqual(
self.p39_to_np39_walking, move_description.move_hours, places=3)
self.assertEqual(city_visit.MoveType.walking, move_description.move_type)
self.assertEqual(self.pier_39_coordinates,
move_description.from_coordinates)
self.assertEqual(self.near_pier_39_coordinates,
move_description.to_coordinates)
def testInconsistentArguments(self):
move_calculator_.MultiMoveCalculator(
[2.0, 4.0],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)])
self.assertRaises(
AssertionError,
move_calculator_.MultiMoveCalculator,
[2.0, 'four'],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)])
self.assertRaises(
AssertionError,
move_calculator_.MultiMoveCalculator,
[2.0, 4.0],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
self.driving_speed])
self.assertRaises(
AssertionError,
move_calculator_.MultiMoveCalculator,
[4.0, 2.0],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)])
self.assertRaises(
AssertionError,
move_calculator_.MultiMoveCalculator,
[2.0, 4.0, 6.0],
[move_calculator_.SimpleMoveCalculator(self.walking_speed, city_visit.MoveType.walking),
move_calculator_.SimpleMoveCalculator(self.ptt_speed, city_visit.MoveType.ptt),
move_calculator_.SimpleMoveCalculator(self.driving_speed, city_visit.MoveType.driving)])
if __name__ == '__main__':
unittest.main()
| 49.604046 | 108 | 0.759133 | 1,963 | 17,163 | 6.247071 | 0.057056 | 0.128435 | 0.097611 | 0.066215 | 0.905488 | 0.889668 | 0.855419 | 0.830466 | 0.820762 | 0.820762 | 0 | 0.026358 | 0.168852 | 17,163 | 345 | 109 | 49.747826 | 0.833298 | 0.021675 | 0 | 0.757785 | 0 | 0 | 0.000715 | 0 | 0 | 0 | 0 | 0 | 0.318339 | 1 | 0.031142 | false | 0 | 0.013841 | 0 | 0.055363 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
293657d92d2a980fc81ff948754266d889ed121f | 8,992 | py | Python | tests/utils/zipfile/test_unzip.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 5 | 2021-01-05T04:21:37.000Z | 2022-01-01T22:12:32.000Z | tests/utils/zipfile/test_unzip.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 51 | 2021-01-03T00:41:59.000Z | 2022-03-27T00:13:51.000Z | tests/utils/zipfile/test_unzip.py | robcxyz/tackle-box | a7a1403d4f7549cdacb32e5b11c1f9043bdd5762 | [
"BSD-3-Clause"
] | 1 | 2022-01-03T11:46:02.000Z | 2022-01-03T11:46:02.000Z | """Tests for function unzip() from zipfile module."""
import tempfile
import pytest
from tackle.utils import zipfile
from tackle.exceptions import InvalidZipRepository
def mock_download():
"""Fake download function."""
with open('files/fake-repo-tmpl.zip', 'rb') as zf:
chunk = zf.read(1024)
while chunk:
yield chunk
chunk = zf.read(1024)
def test_unzip_local_file(mocker, tmpdir, change_dir_main_fixtures):
"""Local file reference can be unzipped."""
mock_prompt_and_delete = mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'files/fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_environment_password(
mocker, tmpdir, change_dir_main_fixtures
):
"""In `unzip()`, the environment can be used to provide a repo password."""
mock_prompt_and_delete = mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
password='sekrit',
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_bad_environment_password(
mocker, tmpdir, change_dir_main_fixtures
):
"""In `unzip()`, an error occurs if the environment has a bad password."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
password='not-the-right-password',
)
def test_unzip_protected_local_file_user_password_with_noinput(
mocker, tmpdir, change_dir_main_fixtures
):
"""Can't unpack a password-protected repo in no_input mode."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
no_input=True,
)
def test_unzip_protected_local_file_user_password(
mocker, tmpdir, change_dir_main_fixtures
):
"""A password-protected local file reference can be unzipped."""
mock_prompt_and_delete = mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
mocker.patch('tackle.utils.zipfile.read_repo_password', return_value='sekrit')
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_protected_local_file_user_bad_password(
mocker, tmpdir, change_dir_main_fixtures
):
"""Error in `unzip()`, if user can't provide a valid password."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
mocker.patch(
'tackle.utils.zipfile.read_repo_password',
return_value='not-the-right-password',
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/protected-fake-repo-tmpl.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
def test_empty_zip_file(mocker, tmpdir, change_dir_main_fixtures):
"""In `unzip()`, an empty file raises an error."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/empty.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
def test_non_repo_zip_file(mocker, tmpdir, change_dir_main_fixtures):
"""In `unzip()`, a repository must have a top level directory."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/not-a-repo.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
def test_bad_zip_file(mocker, tmpdir, change_dir_main_fixtures):
"""In `unzip()`, a corrupted zip file raises an error."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
with pytest.raises(InvalidZipRepository):
zipfile.unzip(
'files/bad-zip-file.zip',
is_url=False,
clone_to_dir=str(clone_to_dir),
)
def test_unzip_url(mocker, tmpdir, change_dir_main_fixtures):
"""In `unzip()`, a url will be downloaded and unzipped."""
mock_prompt_and_delete = mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'tackle.utils.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir),
)
assert output_dir.startswith(tempfile.gettempdir())
assert not mock_prompt_and_delete.called
def test_unzip_url_existing_cache(mocker, tmpdir, change_dir_main_fixtures):
"""Url should be downloaded and unzipped, old zip file will be removed."""
mock_prompt_and_delete = mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
return_value=True,
autospec=True,
)
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'tackle.utils.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir),
)
assert output_dir.startswith(tempfile.gettempdir())
assert mock_prompt_and_delete.call_count == 1
def test_unzip_url_existing_cache_no_input(mocker, tmpdir, change_dir_main_fixtures):
"""If no_input is provided, the existing file should be removed."""
request = mocker.MagicMock()
request.iter_content.return_value = mock_download()
mocker.patch(
'tackle.utils.zipfile.requests.get',
return_value=request,
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
output_dir = zipfile.unzip(
'https://example.com/path/to/fake-repo-tmpl.zip',
is_url=True,
clone_to_dir=str(clone_to_dir),
no_input=True,
)
assert output_dir.startswith(tempfile.gettempdir())
def test_unzip_should_abort_if_no_redownload(mocker, tmpdir, change_dir_main_fixtures):
"""Should exit without cloning anything If no redownload."""
mocker.patch(
'tackle.utils.zipfile.prompt_and_delete',
side_effect=SystemExit,
autospec=True,
)
mock_requests_get = mocker.patch(
'tackle.utils.zipfile.requests.get',
autospec=True,
)
clone_to_dir = tmpdir.mkdir('clone')
# Create an existing cache of the zipfile
existing_zip = clone_to_dir.join('fake-repo-tmpl.zip')
existing_zip.write('This is an existing zipfile')
zipfile_url = 'https://example.com/path/to/fake-repo-tmpl.zip'
with pytest.raises(SystemExit):
zipfile.unzip(zipfile_url, is_url=True, clone_to_dir=str(clone_to_dir))
assert not mock_requests_get.called
| 28.188088 | 87 | 0.668372 | 1,153 | 8,992 | 4.939289 | 0.117086 | 0.051624 | 0.073749 | 0.069535 | 0.8295 | 0.825988 | 0.787533 | 0.760316 | 0.73784 | 0.723617 | 0 | 0.001291 | 0.224755 | 8,992 | 318 | 88 | 28.27673 | 0.815665 | 0.104649 | 0 | 0.709251 | 0 | 0 | 0.17346 | 0.119152 | 0 | 0 | 0 | 0 | 0.052863 | 1 | 0.061674 | false | 0.044053 | 0.017621 | 0 | 0.079295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2962394f875d161bb7c8bb0f867d6155a1b3b0f6 | 140 | py | Python | kerasy/search/__init__.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | 4 | 2020-04-25T08:50:36.000Z | 2020-04-26T04:49:16.000Z | kerasy/search/__init__.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | kerasy/search/__init__.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from . import Astar
from . import itemset
from . import smart_pay
from .smart_pay import smart_pay
| 17.5 | 38 | 0.814286 | 21 | 140 | 5.047619 | 0.380952 | 0.283019 | 0.264151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157143 | 140 | 7 | 39 | 20 | 0.898305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
464dd43caca90175262262dfb56977cfbb1870c1 | 10,834 | py | Python | python/gpmp_utils/generateArm.py | kalyanvasudev/gpmp2 | 1ee99c743d978ab20dc804c8cd9cfa7813084957 | [
"BSD-3-Clause"
] | null | null | null | python/gpmp_utils/generateArm.py | kalyanvasudev/gpmp2 | 1ee99c743d978ab20dc804c8cd9cfa7813084957 | [
"BSD-3-Clause"
] | null | null | null | python/gpmp_utils/generateArm.py | kalyanvasudev/gpmp2 | 1ee99c743d978ab20dc804c8cd9cfa7813084957 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from gtsam import *
from gpmp2 import *
import math
def generateArm(arm_str, base_pose=None):
# %GENERATEARM Generate arm model
# %
# % Usage: arm_model = GENERATEARM(arm_str)
# % @arm_str dataset string, existing datasets:
# % 'SimpleTwoLinksArm', 'SimpleThreeLinksArm', 'WAMArm', 'PR2Arm'
# % @base_pose arm's base pose, default is origin with no rotation
# %
# % Output Format:
# % arm_model an ArmModel object, contains kinematics and model information
if base_pose is None:
base_pose = Pose3(Rot3(np.identity(3)), Point3(np.asarray([0, 0, 0])))
#% 2 link arm
if arm_str is 'SimpleTwoLinksArm':
#% abstract arm
a = np.asarray([0.5, 0.5])
d = np.asarray([0, 0])
alpha = np.asarray([0, 0])
arm = Arm(2, a, alpha, d)
#% physical arm
spheres_data = [
[0, -0.5, 0.0, 0.0, 0.01],
[0, -0.4, 0.0, 0.0, 0.01],
[0, -0.3, 0.0, 0.0, 0.01],
[0, -0.2, 0.0, 0.0, 0.01],
[0, -0.1, 0.0, 0.0, 0.01],
[1, -0.5, 0.0, 0.0, 0.01],
[1, -0.4, 0.0, 0.0, 0.01],
[1, -0.3, 0.0, 0.0, 0.01],
[1, -0.2, 0.0, 0.0, 0.01],
[1, -0.1, 0.0, 0.0, 0.01],
[1, 0.0, 0.0, 0.0, 0.01]]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(arm, sphere_vec)
#% 3 link arm
elif arm_str is 'SimpleThreeLinksArm':
#% abstract arm
a = np.asarray([0.5, 0.5, 0.5])
d = np.asarray([0, 0, 0])
alpha = np.asarray([0, 0, 0])
arm = Arm(3, a, alpha, d)
#% physical arm
spheres_data = [
[0, -0.5, 0.0, 0.0, 0.01],
[0, -0.4, 0.0, 0.0, 0.01],
[0, -0.3, 0.0, 0.0, 0.01],
[0, -0.2, 0.0, 0.0, 0.01],
[0, -0.1, 0.0, 0.0, 0.01],
[1, -0.5, 0.0, 0.0, 0.01],
[1, -0.4, 0.0, 0.0, 0.01],
[1, -0.3, 0.0, 0.0, 0.01],
[1, -0.2, 0.0, 0.0, 0.01],
[1, -0.1, 0.0, 0.0, 0.01],
[2, -0.5, 0.0, 0.0, 0.01],
[2, -0.4, 0.0, 0.0, 0.01],
[2, -0.3, 0.0, 0.0, 0.01],
[2, -0.2, 0.0, 0.0, 0.01],
[2, -0.1, 0.0, 0.0, 0.01],
[2, 0.0, 0.0, 0.0, 0.01]]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(arm, sphere_vec)
#% 7 link WAM arm
elif arm_str is 'WAMArm':
#% arm: WAM arm
alpha = np.asarray([-np.pi/2, np.pi/2,-np.pi/2, np.pi/2,-np.pi/2, np.pi/2,0])
a = np.asarray([0,0,0.045,-0.045,0,0,0])
d = np.asarray([0,0,0.55,0,0.3,0,0.06])
theta = np.asarray([0, 0, 0, 0, 0, 0, 0])
abs_arm = Arm(7, a, alpha, d, base_pose, theta)
#% physical arm
#% sphere data [id x y z r]
spheres_data = [
[0, 0.0, 0.0, 0.0, 0.15],
[1, 0.0, 0.0, 0.2, 0.06],
[1, 0.0, 0.0, 0.3, 0.06],
[1, 0.0, 0.0, 0.4, 0.06],
[1, 0.0, 0.0, 0.5, 0.06],
[2, 0.0, 0.0, 0.0, 0.06],
[3, 0.0, 0.0, 0.1, 0.06],
[3, 0.0, 0.0, 0.2, 0.06],
[3, 0.0, 0.0, 0.3, 0.06],
[5, 0.0, 0.0, 0.1, 0.06],
[6, 0.1, -0.025, 0.08, 0.04],
[6, 0.1, 0.025, 0.08, 0.04],
[6, -0.1, 0, 0.08, 0.04],
[6, 0.15, -0.025, 0.13, 0.04],
[6, 0.15, 0.025, 0.13, 0.04],
[6, -0.15, 0, 0.13, 0.04]]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(abs_arm, sphere_vec)
#% 7 link WAM arm
elif arm_str is 'SAWYERArm':
#% arm: WAM arm
alpha = np.asarray([-np.pi/2, -np.pi/2, np.pi/2, np.pi/2, -np.pi/2, -np.pi/2, 0.0])
a = 0.001*np.asarray([81.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
d = 0.001*np.asarray([317.0, 192.5, 400.0, -168.5, 400.0, 136.3, 133.75])
theta = np.asarray([0.0, 3*np.pi/2, 0.0, np.pi, 0.0, np.pi, 3*np.pi/2])
abs_arm = Arm(7, a, alpha, d, base_pose, theta)
#% physical arm
#% sphere data [id x y z r]
spheres_data = [
[0, 0.0, 0.0, 0.0, 0.07],
[0, -0.06, 0.1, -0.02, 0.07],
[0, -0.08, 0.23, -0.01, 0.08],
[0, 0.0, 0.0, 0.1, 0.07],
[1, 0.0, 0.0, 0.0, 0.06],
[1, 0.0, 0.0, 0.11, 0.06],
[1, 0.0, 0.0, 0.22, 0.06],
[1, 0.0, 0.0, 0.31, 0.05],
[2, 0.0, 0.0, -0.02, 0.05],
[2, 0.0, 0.0, -0.08, 0.055],
[3, 0.0, 0.0, 0.0, 0.056],
[3, 0.0, 0.0, 0.1, 0.054],
[3, 0.0, 0.0, 0.2, 0.054],
[3, 0.0, 0.0, 0.3, 0.054],
[4, 0.0, 0.0, 0.0, 0.05],
[4, 0.0, 0.0, 0.075, 0.045],
[5, 0.0, 0.0, 0.0, 0.045],
[5, 0.0, 0.0, 0.08, 0.045],
[6, -0.04, 0.0, -0.02, 0.04],
[6, 0.0, 0.0, 0.0, 0.05],
[6, 0.0, 0.0, 0.07, 0.04]]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(abs_arm, sphere_vec)
#% 7 DOF PR2 right arm
elif arm_str is 'PR2Arm':
#% arm: PR2 arm
alpha = np.asarray([-1.5708, 1.5708, -1.5708, 1.5708, -1.5708, 1.5708, 0])
a = np.asarray([0.1, 0, 0, 0, 0, 0, 0])
d = np.asarray([0, 0, 0.4, 0, 0.321, 0, 0])
theta = np.asarray([0, 1.5708, 0, 0, 0, 0, 0])
abs_arm = Arm(7, a, alpha, d, base_pose, theta)
#% physical arm
#% sphere data [id x y z r]
spheres_data = [
[0, -0.010000, 0.000000, 0.000000, 0.180000],
[2, 0.015000, 0.220000, -0.000000, 0.110000],
[2, 0.035000, 0.140000, -0.000000, 0.080000],
[2, 0.035000, 0.072500, -0.000000, 0.080000],
[2, 0.000000, 0.000000, -0.000000, 0.105000],
[4, -0.005000, 0.321-0.130000, -0.000000, 0.075000],
[4, 0.010000, 0.321-0.200000, -0.025000, 0.055000],
[4, 0.010000, 0.321-0.200000, 0.025000, 0.055000],
[4, 0.015000, 0.321-0.265000, -0.027500, 0.050000],
[4, 0.015000, 0.321-0.265000, 0.027500, 0.050000],
[4, 0.005000, 0.321-0.320000, -0.022500, 0.050000],
[4, 0.005000, 0.321-0.320000, 0.022500, 0.050000],
[6, 0, -0.017500, 0.072500, 0.040000],
[6, 0, 0.017500, 0.072500, 0.040000],
[6, 0, 0, 0.092500, 0.040000],
[6, 0, 0.03600, 0.11, 0.040000],
[6, 0, 0.027000, 0.155, 0.035000],
[6, 0, 0.00900, 0.18, 0.030000],
[6, 0, 0.00950, 0.205, 0.020000],
[6, 0, -0.03600, 0.11, 0.040000],
[6, 0, -0.027000, 0.155, 0.035000],
[6, 0, -0.00900, 0.18, 0.030000],
[6, 0, -0.00950, 0.205, 0.020000]]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(abs_arm, sphere_vec)
#% 6 DOF JACO2 arm
elif arm_str is 'JACO2Arm':
#% arm: JACO2 6DOF arm
alpha = np.asarray([np.pi/2, np.pi, np.pi/2, 1.0472, 1.0472, np.pi])
a = np.asarray([0, 0.41, 0, 0, 0, 0])
d = np.asarray([0.2755, 0, -0.0098, -0.2501, -0.0856, -0.2228])
theta = np.asarray([0, 0, 0, 0, 0, 0])
abs_arm = Arm(6, a, alpha, d, base_pose, theta);
#% physical arm
#% sphere data [id x y z r]
spheres_data = [
[0, 0.0, 0.0, 0.0, 0.053],
[0, 0.0, -0.08, 0.0, 0.053],
[0, 0.0, -0.155, 0.0, 0.053],
[0, 0.0, -0.23, 0.0, 0.053],
[1, 0.0, 0.0, 0.0, 0.053],
[1, -0.06, 0.0, 0.03, 0.04],
[1, -0.12, 0.0, 0.03, 0.04],
[1, -0.18, 0.0, 0.03, 0.04],
[1, -0.24, 0.0, 0.03, 0.04],
[1, -0.30, 0.0, 0.03, 0.04],
[1, -0.36, 0.0, 0.03, 0.04],
[2, 0.0, -0.01, -0.05, 0.035],
[2, 0.0, -0.01, -0.10, 0.03],
[2, 0.0, 0.0, -0.15, 0.035],
[2, 0.0, 0.0, -0.2, 0.035],
[3, 0.0, 0.0, 0.0, 0.04],
[3, 0.0, 0.0, -0.045, 0.04],
[4, 0.0, 0.0, 0.0, 0.04],
[4, 0.0, -0.008, -0.075, 0.05],
[5, 0.0, 0.05, -0.01, 0.013],
[5, 0.0, 0.05, 0.01, 0.013],
[5, 0.0, 0.06, -0.039, 0.018],
[5, 0.0, 0.06, -0.067, 0.018],
[5, 0.0, 0.035, -0.042, 0.018],
[5, 0.0, -0.05, -0.01, 0.013],
[5, 0.0, -0.05, 0.01, 0.013],
[5, 0.0, -0.06, -0.039, 0.018],
[5, 0.0, -0.06, -0.067, 0.018],
[5, 0.0, -0.035, -0.042, 0.018],
[5, 0.0, 0.015, -0.055, 0.02],
[5, 0.0, 0.025, -0.08, 0.02],
[5, 0.0, 0.0, -0.08, 0.02],
[5, 0.0, -0.025, -0.08, 0.02],
[5, 0.0, -0.015, -0.055, 0.02],
]
spheres_data = np.asarray(spheres_data)
nr_body = spheres_data.shape[0]
sphere_vec = BodySphereVector()
for i in range(nr_body):
sphere_vec.push_back(BodySphere(spheres_data[i,0], spheres_data[i,4],
Point3(spheres_data[i,1:4])))
arm_model = ArmModel(abs_arm, sphere_vec)
#% no such dataset
else:
raise NameError('No such arm exists')
return arm_model | 40.729323 | 91 | 0.430404 | 1,914 | 10,834 | 2.377743 | 0.097701 | 0.191167 | 0.191826 | 0.161723 | 0.787739 | 0.747089 | 0.719402 | 0.7049 | 0.620962 | 0.577895 | 0 | 0.292305 | 0.367824 | 10,834 | 266 | 92 | 40.729323 | 0.372171 | 0.071811 | 0 | 0.331776 | 1 | 0 | 0.00828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004673 | false | 0 | 0.018692 | 0 | 0.028037 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
4662195519206b75f156ce77cf56f90d81bfc536 | 24,558 | py | Python | myems-api/core/distributioncircuit.py | 18600575648/myems | 38ab7d509b5ab275a4df0333e6256c586abdfbf9 | [
"MIT"
] | null | null | null | myems-api/core/distributioncircuit.py | 18600575648/myems | 38ab7d509b5ab275a4df0333e6256c586abdfbf9 | [
"MIT"
] | null | null | null | myems-api/core/distributioncircuit.py | 18600575648/myems | 38ab7d509b5ab275a4df0333e6256c586abdfbf9 | [
"MIT"
] | null | null | null | import falcon
import simplejson as json
import mysql.connector
import config
import uuid
from core.useractivity import user_logger, access_control
class DistributionCircuitCollection:
@staticmethod
def __init__():
"""Initializes DistributionCircuitCollection"""
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, uuid "
" FROM tbl_distribution_systems ")
cursor.execute(query)
rows_distribution_systems = cursor.fetchall()
distribution_system_dict = dict()
if rows_distribution_systems is not None and len(rows_distribution_systems) > 0:
for row in rows_distribution_systems:
distribution_system_dict[row[0]] = {"id": row[0],
"name": row[1],
"uuid": row[2]}
query = (" SELECT id, name, uuid, distribution_system_id, "
" distribution_room, switchgear, peak_load, peak_current, customers, meters "
" FROM tbl_distribution_circuits "
" ORDER BY id ")
cursor.execute(query)
rows_distribution_circuits = cursor.fetchall()
result = list()
if rows_distribution_circuits is not None and len(rows_distribution_circuits) > 0:
for row in rows_distribution_circuits:
distribution_system = distribution_system_dict.get(row[3])
meta_result = {"id": row[0],
"name": row[1],
"uuid": row[2],
"distribution_system": distribution_system,
"distribution_room": row[4],
"switchgear": row[5],
"peak_load": row[6],
"peak_current": row[7],
"customers": row[8],
"meters": row[9]}
result.append(meta_result)
cursor.close()
cnx.close()
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_NAME')
name = str.strip(new_values['data']['name'])
if 'distribution_system_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['distribution_system_id'], int) or \
new_values['data']['distribution_system_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_SYSTEM_ID')
distribution_system_id = new_values['data']['distribution_system_id']
if 'distribution_room' not in new_values['data'].keys() or \
not isinstance(new_values['data']['distribution_room'], str) or \
len(str.strip(new_values['data']['distribution_room'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_ROOM')
distribution_room = str.strip(new_values['data']['distribution_room'])
if 'switchgear' not in new_values['data'].keys() or \
not isinstance(new_values['data']['switchgear'], str) or \
len(str.strip(new_values['data']['switchgear'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_SWITCHGEAR')
switchgear = str.strip(new_values['data']['switchgear'])
if 'peak_load' not in new_values['data'].keys() or \
not (isinstance(new_values['data']['peak_load'], float) or
isinstance(new_values['data']['peak_load'], int)):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_PEAK_LOAD')
peak_load = float(new_values['data']['peak_load'])
if 'peak_current' not in new_values['data'].keys() or \
not (isinstance(new_values['data']['peak_current'], float) or
isinstance(new_values['data']['peak_current'], int)):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_PEAK_CURRENT')
peak_current = float(new_values['data']['peak_current'])
if 'customers' in new_values['data'].keys() and \
new_values['data']['customers'] is not None and \
len(str(new_values['data']['customers'])) > 0:
customers = str.strip(new_values['data']['customers'])
else:
customers = None
if 'meters' in new_values['data'].keys() and \
new_values['data']['meters'] is not None and \
len(str(new_values['data']['meters'])) > 0:
meters = str.strip(new_values['data']['meters'])
else:
meters = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_distribution_systems "
" WHERE id = %s ",
(distribution_system_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_SYSTEM_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_distribution_circuits "
" WHERE distribution_system_id = %s AND name = %s ",
(distribution_system_id, name,))
if cursor.fetchone() is not None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.DISTRIBUTION_CIRCUIT_NAME_IS_ALREADY_IN_USE')
add_values = (" INSERT INTO tbl_distribution_circuits "
" (name, uuid, distribution_system_id,"
" distribution_room, switchgear, peak_load, peak_current, customers, meters) "
" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) ")
cursor.execute(add_values, (name,
str(uuid.uuid4()),
distribution_system_id,
distribution_room,
switchgear,
peak_load,
peak_current,
customers,
meters))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_201
resp.location = '/distributioncircuits/' + str(new_id)
class DistributionCircuitItem:
@staticmethod
def __init__():
"""Initializes DistributionCircuitItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_METER_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, uuid "
" FROM tbl_distribution_systems ")
cursor.execute(query)
rows_distribution_systems = cursor.fetchall()
distribution_system_dict = dict()
if rows_distribution_systems is not None and len(rows_distribution_systems) > 0:
for row in rows_distribution_systems:
distribution_system_dict[row[0]] = {"id": row[0],
"name": row[1],
"uuid": row[2]}
query = (" SELECT id, name, uuid, distribution_system_id, "
" distribution_room, switchgear, peak_load, peak_current, customers, meters "
" FROM tbl_distribution_circuits "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
cursor.close()
cnx.close()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_NOT_FOUND')
else:
distribution_system = distribution_system_dict.get(row[3])
meta_result = {"id": row[0],
"name": row[1],
"uuid": row[2],
"distribution_system": distribution_system,
"distribution_room": row[4],
"switchgear": row[5],
"peak_load": row[6],
"peak_current": row[7],
"customers": row[8],
"meters": row[9]}
resp.text = json.dumps(meta_result)
@staticmethod
@user_logger
def on_delete(req, resp, id_):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_distribution_circuits "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_NOT_FOUND')
# delete relation with points
cursor.execute(" DELETE FROM tbl_distribution_circuits_points "
" WHERE distribution_circuit_id = %s ", (id_,))
# delete distribution circuit itself
cursor.execute(" DELETE FROM tbl_distribution_circuits "
" WHERE id = %s ", (id_,))
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_204
@staticmethod
@user_logger
def on_put(req, resp, id_):
"""Handles PUT requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_ID')
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_NAME')
name = str.strip(new_values['data']['name'])
if 'distribution_system_id' not in new_values['data'].keys() or \
not isinstance(new_values['data']['distribution_system_id'], int) or \
new_values['data']['distribution_system_id'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_SYSTEM_ID')
distribution_system_id = new_values['data']['distribution_system_id']
if 'distribution_room' not in new_values['data'].keys() or \
not isinstance(new_values['data']['distribution_room'], str) or \
len(str.strip(new_values['data']['distribution_room'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_ROOM')
distribution_room = str.strip(new_values['data']['distribution_room'])
if 'switchgear' not in new_values['data'].keys() or \
not isinstance(new_values['data']['switchgear'], str) or \
len(str.strip(new_values['data']['switchgear'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_SWITCHGEAR')
switchgear = str.strip(new_values['data']['switchgear'])
if 'peak_load' not in new_values['data'].keys() or \
not (isinstance(new_values['data']['peak_load'], float) or
isinstance(new_values['data']['peak_load'], int)):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_PEAK_LOAD')
peak_load = float(new_values['data']['peak_load'])
if 'peak_current' not in new_values['data'].keys() or \
not (isinstance(new_values['data']['peak_current'], float) or
isinstance(new_values['data']['peak_current'], int)):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_PEAK_CURRENT')
peak_current = float(new_values['data']['peak_current'])
if 'customers' in new_values['data'].keys() and \
new_values['data']['customers'] is not None and \
len(str(new_values['data']['customers'])) > 0:
customers = str.strip(new_values['data']['customers'])
else:
customers = None
if 'meters' in new_values['data'].keys() and \
new_values['data']['meters'] is not None and \
len(str(new_values['data']['meters'])) > 0:
meters = str.strip(new_values['data']['meters'])
else:
meters = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_distribution_systems "
" WHERE id = %s ",
(distribution_system_id,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_SYSTEM_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_distribution_circuits "
" WHERE distribution_system_id = %s AND name = %s AND id != %s ",
(distribution_system_id, name, id_))
if cursor.fetchone() is not None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.DISTRIBUTION_CIRCUIT_NAME_IS_ALREADY_IN_USE')
update_row = (" UPDATE tbl_distribution_circuits "
" SET name = %s, distribution_system_id = %s, distribution_room = %s, switchgear = %s, "
" peak_load = %s, peak_current = %s, customers = %s, meters = %s "
" WHERE id = %s ")
cursor.execute(update_row, (name,
distribution_system_id,
distribution_room,
switchgear,
peak_load,
peak_current,
customers,
meters,
id_))
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_200
class DistributionCircuitPointCollection:
@staticmethod
def __init__():
"""Initializes DistributionCircuitPointCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, uuid "
" FROM tbl_distribution_systems ")
cursor.execute(query)
rows_distribution_systems = cursor.fetchall()
distribution_system_dict = dict()
if rows_distribution_systems is not None and len(rows_distribution_systems) > 0:
for row in rows_distribution_systems:
distribution_system_dict[row['uuid']] = {"id": row[0],
"name": row[1],
"uuid": row[2]}
cursor.execute(" SELECT name "
" FROM tbl_distribution_circuits "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_NOT_FOUND')
query = (" SELECT p.id AS point_id, p.name AS point_name, p.address AS point_address, "
" dc.id AS distribution_circuit_id, dc.name AS distribution_circuit_name, "
" dc.uuid AS distribution_circuit_uuid "
" FROM tbl_points p, tbl_distribution_circuits_points dcp, tbl_distribution_circuits dc "
" WHERE dcp.distribution_circuit_id = %s AND p.id = dcp.point_id "
" AND dcp.distribution_circuit_id = dc.id "
" ORDER BY p.name ")
cursor.execute(query, (id_,))
rows = cursor.fetchall()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0], "name": row[1], "address": row[2],
"distribution_circuit": {"id": row[3],
"name": row[4],
"uuid": row[5]}}
result.append(meta_result)
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_post(req, resp, id_):
"""Handles POST requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_ID')
new_values = json.loads(raw_json)
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" from tbl_distribution_circuits "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_points "
" WHERE id = %s ", (new_values['data']['point_id'],))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.POINT_NOT_FOUND')
query = (" SELECT id "
" FROM tbl_distribution_circuits_points "
" WHERE distribution_circuit_id = %s AND point_id = %s")
cursor.execute(query, (id_, new_values['data']['point_id'],))
if cursor.fetchone() is not None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR',
description='API.DISTRIBUTION_CIRCUIT_POINT_RELATION_EXISTS')
add_row = (" INSERT INTO tbl_distribution_circuits_points (distribution_circuit_id, point_id) "
" VALUES (%s, %s) ")
cursor.execute(add_row, (id_, new_values['data']['point_id'],))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_201
resp.location = '/distributioncircuits/' + str(id_) + '/points/' + str(new_values['data']['point_id'])
class DistributionCircuitPointItem:
@staticmethod
def __init__():
"""Initializes DistributionCircuitPointItem"""
pass
@staticmethod
def on_options(req, resp, id_, pid):
resp.status = falcon.HTTP_200
@staticmethod
@user_logger
def on_delete(req, resp, id_, pid):
access_control(req)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_DISTRIBUTION_CIRCUIT_ID')
if not pid.isdigit() or int(pid) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_POINT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_distribution_circuits "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_points "
" WHERE id = %s ", (pid,))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.POINT_NOT_FOUND')
cursor.execute(" SELECT id "
" FROM tbl_distribution_circuits_points "
" WHERE distribution_circuit_id = %s AND point_id = %s ", (id_, pid))
if cursor.fetchone() is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.DISTRIBUTION_CIRCUIT_POINT_RELATION_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_distribution_circuits_points "
" WHERE distribution_circuit_id = %s AND point_id = %s ", (id_, pid))
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_204
| 43.697509 | 110 | 0.53734 | 2,522 | 24,558 | 4.998414 | 0.061856 | 0.05069 | 0.070125 | 0.072188 | 0.889418 | 0.861574 | 0.842297 | 0.83492 | 0.826591 | 0.815326 | 0 | 0.012393 | 0.352716 | 24,558 | 561 | 111 | 43.775401 | 0.780637 | 0.011972 | 0 | 0.831897 | 0 | 0.00431 | 0.221787 | 0.088624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.008621 | 0.012931 | 0 | 0.056034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
466508ca8e29b457c04616ee05eb17866d952f9d | 21,188 | py | Python | tests/test_ratchet.py | robertatakenaka/Logger | 42e791423560b06f159fb2da3750ced4ee8f8eef | [
"BSD-2-Clause"
] | 1 | 2019-03-16T04:11:23.000Z | 2019-03-16T04:11:23.000Z | tests/test_ratchet.py | robertatakenaka/Logger | 42e791423560b06f159fb2da3750ced4ee8f8eef | [
"BSD-2-Clause"
] | null | null | null | tests/test_ratchet.py | robertatakenaka/Logger | 42e791423560b06f159fb2da3750ced4ee8f8eef | [
"BSD-2-Clause"
] | null | null | null | import unittest
from logger import ratchet
class RatchetBulkTests(unittest.TestCase):
def setUp(self):
self.rb = ratchet.Local('fakeapiuri', 'scl')
def test_register_download_access_keys(self):
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-29'
)
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-30'
)
expected = [
'/PDF/BJMBR/V14N4/03.PDF', '1414-431X', 'scl'
]
self.assertEqual(sorted(self.rb.bulk_data.keys()), expected)
def test_register_download_access_total(self):
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-29'
)
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-30'
)
expected = [
'/PDF/BJMBR/V14N4/03.PDF', '1414-431X', 'scl'
]
self.assertEqual(
self.rb.bulk_data['/PDF/BJMBR/V14N4/03.PDF']['total'], 2
)
self.assertEqual(
self.rb.bulk_data['scl']['total'], 2
)
self.assertEqual(
self.rb.bulk_data['1414-431X']['total'], 2
)
def test_register_download_access_keys_values_journal(self):
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-29'
)
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-30'
)
expected = [
'code:1414-431X',
'pdf.total:2',
'pdf.y2013.m05.d29:1',
'pdf.y2013.m05.d30:1',
'pdf.y2013.m05.total:2',
'pdf.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_download_access_keys_values_website(self):
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-29'
)
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-30'
)
expected = [
'code:scl',
'pdf.total:2',
'pdf.y2013.m05.d29:1',
'pdf.y2013.m05.d30:1',
'pdf.y2013.m05.total:2',
'pdf.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_download_access_keys_values_pdf(self):
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-29'
)
self.rb.register_download_access(
'/pdf/bjmbr/v14n4/03.pdf', '1414-431X', '2013-05-30'
)
expected = [
'code:/PDF/BJMBR/V14N4/03.PDF',
'pdf.total:2',
'pdf.y2013.m05.d29:1',
'pdf.y2013.m05.d30:1',
'pdf.y2013.m05.total:2',
'pdf.y2013.total:2',
'total:2',
'type:pdf',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['/PDF/BJMBR/V14N4/03.PDF'].items()]), expected
)
def test_register_alpha_access_keys_values_website(self):
self.rb.register_alpha_access(
'scl', '2013-05-29'
)
self.rb.register_alpha_access(
'scl', '2013-05-30'
)
expected = [
'alpha.total:2',
'alpha.y2013.m05.d29:1',
'alpha.y2013.m05.d30:1',
'alpha.y2013.m05.total:2',
'alpha.y2013.total:2',
'code:scl',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_issues_access_key_values(self):
self.rb.register_issues_access(
'1414-431X', '2013-05-29'
)
self.rb.register_issues_access(
'1414-431X', '2013-05-30'
)
expected = [
'code:1414-431X',
'issues.total:2',
'issues.y2013.m05.d29:1',
'issues.y2013.m05.d30:1',
'issues.y2013.m05.total:2',
'issues.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_issues_access_keys_values(self):
self.rb.register_issues_access(
'1414-431X', '2013-05-29'
)
self.rb.register_issues_access(
'1414-431X', '2013-05-30'
)
expected = [
'code:scl',
'issues.total:2',
'issues.y2013.m05.d29:1',
'issues.y2013.m05.d30:1',
'issues.y2013.m05.total:2',
'issues.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_home_access_keys_values(self):
self.rb.register_home_access(
'scl', '2013-05-29'
)
self.rb.register_home_access(
'scl', '2013-05-30'
)
expected = [
'code:scl',
'home.total:2',
'home.y2013.m05.d29:1',
'home.y2013.m05.d30:1',
'home.y2013.m05.total:2',
'home.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_journal_access_keys_values(self):
self.rb.register_journal_access(
'1414-431X', '2013-05-29'
)
self.rb.register_journal_access(
'1414-431X', '2013-05-30'
)
expected = [
'code:1414-431X',
'journal.total:2',
'journal.y2013.m05.d29:1',
'journal.y2013.m05.d30:1',
'journal.y2013.m05.total:2',
'journal.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_journal_access_keys_values_website(self):
self.rb.register_journal_access(
'1414-431X', '2013-05-29'
)
self.rb.register_journal_access(
'1414-431X', '2013-05-30'
)
expected = [
'code:scl',
'journal.total:2',
'journal.y2013.m05.d29:1',
'journal.y2013.m05.d30:1',
'journal.y2013.m05.total:2',
'journal.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_toc_access_keys_values_website(self):
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-29'
)
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-30'
)
expected = [
'code:scl',
'toc.total:2',
'toc.y2013.m05.d29:1',
'toc.y2013.m05.d30:1',
'toc.y2013.m05.total:2',
'toc.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_toc_access_keys_values_journal(self):
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-29'
)
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-30'
)
expected = [
'code:1414-431X',
'toc.total:2',
'toc.y2013.m05.d29:1',
'toc.y2013.m05.d30:1',
'toc.y2013.m05.total:2',
'toc.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_toc_access_keys_values(self):
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-29'
)
self.rb.register_toc_access(
'1414-431X20140001', '2013-05-30'
)
expected = [
'code:1414-431X20140001',
'journal:1414-431X',
'toc.total:2',
'toc.y2013.m05.d29:1',
'toc.y2013.m05.d30:1',
'toc.y2013.m05.total:2',
'toc.y2013.total:2',
'total:2',
'type:issue',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X20140001'].items()]), expected
)
def test_register_article_access_keys_values_website(self):
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:scl',
'html.total:2',
'html.y2013.m05.d29:1',
'html.y2013.m05.d30:1',
'html.y2013.m05.total:2',
'html.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_article_access_keys_values_journal(self):
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:1414-431X',
'html.total:2',
'html.y2013.m05.d29:1',
'html.y2013.m05.d30:1',
'html.y2013.m05.total:2',
'html.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_article_access_keys_values_toc(self):
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:1414-431X20140001',
'html.total:2',
'html.y2013.m05.d29:1',
'html.y2013.m05.d30:1',
'html.y2013.m05.total:2',
'html.y2013.total:2',
'total:2',
'type:issue',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X20140001'].items()]), expected
)
def test_register_article_access_keys_values(self):
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_article_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:S1414-431X2014000100005',
'html.total:2',
'html.y2013.m05.d29:1',
'html.y2013.m05.d30:1',
'html.y2013.m05.total:2',
'html.y2013.total:2',
'issue:1414-431X20140001',
'journal:1414-431X',
'total:2',
'type:article',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['S1414-431X2014000100005'].items()]), expected
)
def test_register_abstract_access_keys_values_website(self):
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'abstract.total:2',
'abstract.y2013.m05.d29:1',
'abstract.y2013.m05.d30:1',
'abstract.y2013.m05.total:2',
'abstract.y2013.total:2',
'code:scl',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_abstract_access_keys_values_journal(self):
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'abstract.total:2',
'abstract.y2013.m05.d29:1',
'abstract.y2013.m05.d30:1',
'abstract.y2013.m05.total:2',
'abstract.y2013.total:2',
'code:1414-431X',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_abstract_access_keys_values_toc(self):
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'abstract.total:2',
'abstract.y2013.m05.d29:1',
'abstract.y2013.m05.d30:1',
'abstract.y2013.m05.total:2',
'abstract.y2013.total:2',
'code:1414-431X20140001',
'total:2',
'type:issue',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X20140001'].items()]), expected
)
def test_register_abstract_access_keys_values(self):
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_abstract_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'abstract.total:2',
'abstract.y2013.m05.d29:1',
'abstract.y2013.m05.d30:1',
'abstract.y2013.m05.total:2',
'abstract.y2013.total:2',
'code:S1414-431X2014000100005',
'issue:1414-431X20140001',
'journal:1414-431X',
'total:2',
'type:article',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['S1414-431X2014000100005'].items()]), expected
)
def test_register_opdf_access_keys_values_website(self):
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:scl',
'other.pdfsite.total:2',
'other.pdfsite.y2013.m05.d29:1',
'other.pdfsite.y2013.m05.d30:1',
'other.pdfsite.y2013.m05.total:2',
'other.pdfsite.y2013.total:2',
'total:2',
'type:website',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['scl'].items()]), expected
)
def test_register_opdf_access_keys_values_journal(self):
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:1414-431X',
'other.pdfsite.total:2',
'other.pdfsite.y2013.m05.d29:1',
'other.pdfsite.y2013.m05.d30:1',
'other.pdfsite.y2013.m05.total:2',
'other.pdfsite.y2013.total:2',
'total:2',
'type:journal',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X'].items()]), expected
)
def test_register_opdf_access_keys_values_toc(self):
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:1414-431X20140001',
'other.pdfsite.total:2',
'other.pdfsite.y2013.m05.d29:1',
'other.pdfsite.y2013.m05.d30:1',
'other.pdfsite.y2013.m05.total:2',
'other.pdfsite.y2013.total:2',
'total:2',
'type:issue',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['1414-431X20140001'].items()]), expected
)
def test_register_opdf_access_keys_values(self):
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-29'
)
self.rb.register_pdf_access(
'S1414-431X2014000100005', '2013-05-30'
)
expected = [
'code:S1414-431X2014000100005',
'issue:1414-431X20140001',
'journal:1414-431X',
'other.pdfsite.total:2',
'other.pdfsite.y2013.m05.d29:1',
'other.pdfsite.y2013.m05.d30:1',
'other.pdfsite.y2013.m05.total:2',
'other.pdfsite.y2013.total:2',
'total:2',
'type:article',
'y2013.m05.d29:1',
'y2013.m05.d30:1',
'y2013.m05.total:2',
'y2013.total:2'
]
self.assertEqual(
sorted(['%s:%s' % (k, v) for k, v in self.rb.bulk_data['S1414-431X2014000100005'].items()]), expected
)
| 27.915679 | 113 | 0.493487 | 2,431 | 21,188 | 4.187577 | 0.027972 | 0.08664 | 0.071513 | 0.056582 | 0.969352 | 0.963163 | 0.952554 | 0.944401 | 0.911002 | 0.866306 | 0 | 0.22633 | 0.351473 | 21,188 | 758 | 114 | 27.952507 | 0.514519 | 0 | 0 | 0.752427 | 0 | 0 | 0.32877 | 0.130517 | 0 | 0 | 0 | 0 | 0.045307 | 1 | 0.043689 | false | 0 | 0.003236 | 0 | 0.048544 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
46a5d58ae4a8804cf3d8a776950517b331f84fd1 | 159,807 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def add_tags(self, Tags: List, ResourceId: str, ResourceType: str) -> Dict:
"""
Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, ``AddTags`` updates the tag's value.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/AddTags>`_
**Request Syntax**
::
response = client.add_tags(
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the ML object that was tagged.
- **ResourceType** *(string) --*
The type of the ML object that was tagged.
:type Tags: list
:param Tags: **[REQUIRED]**
The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null.
- *(dict) --*
A custom key-value pair associated with an ML object, such as an ML model.
- **Key** *(string) --*
A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
- **Value** *(string) --*
An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the ML object to tag. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the ML object to tag.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_batch_prediction(self, BatchPredictionId: str, MLModelId: str, BatchPredictionDataSourceId: str, OutputUri: str, BatchPredictionName: str = None) -> Dict:
"""
Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a ``DataSource`` . This operation creates a new ``BatchPrediction`` , and uses an ``MLModel`` and the data files referenced by the ``DataSource`` as information sources.
``CreateBatchPrediction`` is an asynchronous operation. In response to ``CreateBatchPrediction`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``BatchPrediction`` status to ``PENDING`` . After the ``BatchPrediction`` completes, Amazon ML sets the status to ``COMPLETED`` .
You can poll for status updates by using the GetBatchPrediction operation and checking the ``Status`` parameter of the result. After the ``COMPLETED`` status appears, the results are available in the location specified by the ``OutputUri`` parameter.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateBatchPrediction>`_
**Request Syntax**
::
response = client.create_batch_prediction(
BatchPredictionId='string',
BatchPredictionName='string',
MLModelId='string',
BatchPredictionDataSourceId='string',
OutputUri='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateBatchPrediction`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateBatchPrediction`` operation is asynchronous. You can poll for status updates by using the ``>GetBatchPrediction`` operation and checking the ``Status`` parameter of the result.
- **BatchPredictionId** *(string) --*
A user-supplied ID that uniquely identifies the ``BatchPrediction`` . This value is identical to the value of the ``BatchPredictionId`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``BatchPrediction`` .
:type BatchPredictionName: string
:param BatchPredictionName:
A user-supplied name or description of the ``BatchPrediction`` . ``BatchPredictionName`` can only use the UTF-8 character set.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID of the ``MLModel`` that will generate predictions for the group of observations.
:type BatchPredictionDataSourceId: string
:param BatchPredictionDataSourceId: **[REQUIRED]**
The ID of the ``DataSource`` that points to the group of observations to predict.
:type OutputUri: string
:param OutputUri: **[REQUIRED]**
The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the ``s3 key`` portion of the ``outputURI`` field: \':\', \'//\', \'/./\', \'/../\'.
Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
:rtype: dict
:returns:
"""
pass
def create_data_source_from_rds(self, DataSourceId: str, RDSData: Dict, RoleARN: str, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` object from an `Amazon Relational Database Service <http://aws.amazon.com/rds/>`__ (Amazon RDS). A ``DataSource`` references data that can be used to perform ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromRDS`` is an asynchronous operation. In response to ``CreateDataSourceFromRDS`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` is created and ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in the ``COMPLETED`` or ``PENDING`` state can be used only to perform ``>CreateMLModel`` >, ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
If Amazon ML cannot accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromRDS>`_
**Request Syntax**
::
response = client.create_data_source_from_rds(
DataSourceId='string',
DataSourceName='string',
RDSData={
'DatabaseInformation': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'SelectSqlQuery': 'string',
'DatabaseCredentials': {
'Username': 'string',
'Password': 'string'
},
'S3StagingLocation': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaUri': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'SubnetId': 'string',
'SecurityGroupIds': [
'string',
]
},
RoleARN='string',
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromRDS`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromRDS`` > operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter. You can inspect the ``Message`` when ``Status`` shows up as ``FAILED`` . You can also check the progress of the copy operation by going to the ``DataPipeline`` console and looking up the pipeline using the ``pipelineId`` from the describe call.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` . Typically, an Amazon Resource Number (ARN) becomes the ID for a ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type RDSData: dict
:param RDSData: **[REQUIRED]**
The data specification of an Amazon RDS ``DataSource`` :
* DatabaseInformation -
* ``DatabaseName`` - The name of the Amazon RDS database.
* ``InstanceIdentifier`` - A unique identifier for the Amazon RDS database instance.
* DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database.
* ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
* ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
* SecurityInfo - The security information to use to access an RDS DB instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [``SubnetId`` , ``SecurityGroupIds`` ] pair for a VPC-based RDS DB instance.
* SelectSqlQuery - A query that is used to retrieve the observation data for the ``Datasource`` .
* S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using ``SelectSqlQuery`` is stored in this location.
* DataSchemaUri - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``Datasource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DatabaseInformation** *(dict) --* **[REQUIRED]**
Describes the ``DatabaseName`` and ``InstanceIdentifier`` of an Amazon RDS database.
- **InstanceIdentifier** *(string) --* **[REQUIRED]**
The ID of an RDS DB instance.
- **DatabaseName** *(string) --* **[REQUIRED]**
The name of a database hosted on an RDS DB instance.
- **SelectSqlQuery** *(string) --* **[REQUIRED]**
The query that is used to retrieve the observation data for the ``DataSource`` .
- **DatabaseCredentials** *(dict) --* **[REQUIRED]**
The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.
- **Username** *(string) --* **[REQUIRED]**
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **Password** *(string) --* **[REQUIRED]**
The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the ``RDSSelectQuery`` query.
- **S3StagingLocation** *(string) --* **[REQUIRED]**
The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using ``SelectSqlQuery`` is stored in this location.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon RDS ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
A ``DataSchema`` is not required if you specify a ``DataSchemaUri``
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaUri** *(string) --*
The Amazon S3 location of the ``DataSchema`` .
- **ResourceRole** *(string) --* **[REQUIRED]**
The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --* **[REQUIRED]**
The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **SubnetId** *(string) --* **[REQUIRED]**
The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.
- **SecurityGroupIds** *(list) --* **[REQUIRED]**
The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
- *(string) --*
:type RoleARN: string
:param RoleARN: **[REQUIRED]**
The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user\'s account and copy data using the ``SelectSqlQuery`` query from Amazon RDS to Amazon S3.
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the DataSourceneeds to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_data_source_from_redshift(self, DataSourceId: str, DataSpec: Dict, RoleARN: str, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` from a database hosted on an Amazon Redshift cluster. A ``DataSource`` references data that can be used to perform either ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromRedshift`` is an asynchronous operation. In response to ``CreateDataSourceFromRedshift`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` is created and ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in ``COMPLETED`` or ``PENDING`` states can be used to perform only ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
If Amazon ML can't accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a ``SelectSqlQuery`` query. Amazon ML executes an ``Unload`` command in Amazon Redshift to transfer the result set of the ``SelectSqlQuery`` query to ``S3StagingLocation`` .
After the ``DataSource`` has been created, it's ready for use in evaluations and batch predictions. If you plan to use the ``DataSource`` to train an ``MLModel`` , the ``DataSource`` also requires a recipe. A recipe describes how each input variable will be used in training an ``MLModel`` . Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.
You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call ``GetDataSource`` for an existing datasource and copy the values to a ``CreateDataSource`` call. Change the settings that you want to change and make sure that all required fields have the appropriate values.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromRedshift>`_
**Request Syntax**
::
response = client.create_data_source_from_redshift(
DataSourceId='string',
DataSourceName='string',
DataSpec={
'DatabaseInformation': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'SelectSqlQuery': 'string',
'DatabaseCredentials': {
'Username': 'string',
'Password': 'string'
},
'S3StagingLocation': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaUri': 'string'
},
RoleARN='string',
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromRedshift`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromRedshift`` operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type DataSpec: dict
:param DataSpec: **[REQUIRED]**
The data specification of an Amazon Redshift ``DataSource`` :
* DatabaseInformation -
* ``DatabaseName`` - The name of the Amazon Redshift database.
* ``ClusterIdentifier`` - The unique ID for the Amazon Redshift cluster.
* DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.
* SelectSqlQuery - The query that is used to retrieve the observation data for the ``Datasource`` .
* S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the ``SelectSqlQuery`` query is stored in this location.
* DataSchemaUri - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``DataSource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DatabaseInformation** *(dict) --* **[REQUIRED]**
Describes the ``DatabaseName`` and ``ClusterIdentifier`` for an Amazon Redshift ``DataSource`` .
- **DatabaseName** *(string) --* **[REQUIRED]**
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --* **[REQUIRED]**
The ID of an Amazon Redshift cluster.
- **SelectSqlQuery** *(string) --* **[REQUIRED]**
Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift ``DataSource`` .
- **DatabaseCredentials** *(dict) --* **[REQUIRED]**
Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.
- **Username** *(string) --* **[REQUIRED]**
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **Password** *(string) --* **[REQUIRED]**
A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a ``RedshiftSelectSqlQuery`` query. The password should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **S3StagingLocation** *(string) --* **[REQUIRED]**
Describes an Amazon S3 location to store the result set of the ``SelectSqlQuery`` query.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon Redshift ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
A ``DataSchema`` is not required if you specify a ``DataSchemaUri`` .
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaUri** *(string) --*
Describes the schema location for an Amazon Redshift ``DataSource`` .
:type RoleARN: string
:param RoleARN: **[REQUIRED]**
A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:
* A security group to allow Amazon ML to execute the ``SelectSqlQuery`` query on an Amazon Redshift cluster
* An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the ``S3StagingLocation``
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the ``DataSource`` needs to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_data_source_from_s3(self, DataSourceId: str, DataSpec: Dict, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` object. A ``DataSource`` references data that can be used to perform ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromS3`` is an asynchronous operation. In response to ``CreateDataSourceFromS3`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` has been created and is ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in the ``COMPLETED`` or ``PENDING`` state can be used to perform only ``CreateMLModel`` , ``CreateEvaluation`` or ``CreateBatchPrediction`` operations.
If Amazon ML can't accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
The observation data used in a ``DataSource`` should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the ``DataSource`` .
After the ``DataSource`` has been created, it's ready to use in evaluations and batch predictions. If you plan to use the ``DataSource`` to train an ``MLModel`` , the ``DataSource`` also needs a recipe. A recipe describes how each input variable will be used in training an ``MLModel`` . Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromS3>`_
**Request Syntax**
::
response = client.create_data_source_from_s3(
DataSourceId='string',
DataSourceName='string',
DataSpec={
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaLocationS3': 'string'
},
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromS3`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromS3`` operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the ``DataSource`` . This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied identifier that uniquely identifies the ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type DataSpec: dict
:param DataSpec: **[REQUIRED]**
The data specification of a ``DataSource`` :
* DataLocationS3 - The Amazon S3 location of the observation data.
* DataSchemaLocationS3 - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``Datasource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DataLocationS3** *(string) --* **[REQUIRED]**
The location of the data file(s) used by a ``DataSource`` . The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon S3 ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
You must provide either the ``DataSchema`` or the ``DataSchemaLocationS3`` .
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaLocationS3** *(string) --*
Describes the schema location in Amazon S3. You must provide either the ``DataSchema`` or the ``DataSchemaLocationS3`` .
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the DataSourceneeds to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_evaluation(self, EvaluationId: str, MLModelId: str, EvaluationDataSourceId: str, EvaluationName: str = None) -> Dict:
"""
Creates a new ``Evaluation`` of an ``MLModel`` . An ``MLModel`` is evaluated on a set of observations associated to a ``DataSource`` . Like a ``DataSource`` for an ``MLModel`` , the ``DataSource`` for an ``Evaluation`` contains values for the ``Target Variable`` . The ``Evaluation`` compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the ``MLModel`` functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding ``MLModelType`` : ``BINARY`` , ``REGRESSION`` or ``MULTICLASS`` .
``CreateEvaluation`` is an asynchronous operation. In response to ``CreateEvaluation`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to ``PENDING`` . After the ``Evaluation`` is created and ready for use, Amazon ML sets the status to ``COMPLETED`` .
You can use the ``GetEvaluation`` operation to check progress of the evaluation during the creation operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateEvaluation>`_
**Request Syntax**
::
response = client.create_evaluation(
EvaluationId='string',
EvaluationName='string',
MLModelId='string',
EvaluationDataSourceId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateEvaluation`` operation, and is an acknowledgement that Amazon ML received the request.
``CreateEvaluation`` operation is asynchronous. You can poll for status updates by using the ``GetEvcaluation`` operation and checking the ``Status`` parameter.
- **EvaluationId** *(string) --*
The user-supplied ID that uniquely identifies the ``Evaluation`` . This value should be identical to the value of the ``EvaluationId`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``Evaluation`` .
:type EvaluationName: string
:param EvaluationName:
A user-supplied name or description of the ``Evaluation`` .
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID of the ``MLModel`` to evaluate.
The schema used in creating the ``MLModel`` must match the schema of the ``DataSource`` used in the ``Evaluation`` .
:type EvaluationDataSourceId: string
:param EvaluationDataSourceId: **[REQUIRED]**
The ID of the ``DataSource`` for the evaluation. The schema of the ``DataSource`` must match the schema used to create the ``MLModel`` .
:rtype: dict
:returns:
"""
pass
def create_ml_model(self, MLModelId: str, MLModelType: str, TrainingDataSourceId: str, MLModelName: str = None, Parameters: Dict = None, Recipe: str = None, RecipeUri: str = None) -> Dict:
"""
Creates a new ``MLModel`` using the ``DataSource`` and the recipe as information sources.
An ``MLModel`` is nearly immutable. Users can update only the ``MLModelName`` and the ``ScoreThreshold`` in an ``MLModel`` without creating a new ``MLModel`` .
``CreateMLModel`` is an asynchronous operation. In response to ``CreateMLModel`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``MLModel`` status to ``PENDING`` . After the ``MLModel`` has been created and ready is for use, Amazon ML sets the status to ``COMPLETED`` .
You can use the ``GetMLModel`` operation to check the progress of the ``MLModel`` during the creation operation.
``CreateMLModel`` requires a ``DataSource`` with computed statistics, which can be created by setting ``ComputeStatistics`` to ``true`` in ``CreateDataSourceFromRDS`` , ``CreateDataSourceFromS3`` , or ``CreateDataSourceFromRedshift`` operations.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateMLModel>`_
**Request Syntax**
::
response = client.create_ml_model(
MLModelId='string',
MLModelName='string',
MLModelType='REGRESSION'|'BINARY'|'MULTICLASS',
Parameters={
'string': 'string'
},
TrainingDataSourceId='string',
Recipe='string',
RecipeUri='string'
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateMLModel`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateMLModel`` operation is asynchronous. You can poll for status updates by using the ``GetMLModel`` operation and checking the ``Status`` parameter.
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``MLModel`` .
:type MLModelName: string
:param MLModelName:
A user-supplied name or description of the ``MLModel`` .
:type MLModelType: string
:param MLModelType: **[REQUIRED]**
The category of supervised learning that this ``MLModel`` will address. Choose from the following types:
* Choose ``REGRESSION`` if the ``MLModel`` will be used to predict a numeric value.
* Choose ``BINARY`` if the ``MLModel`` result has two possible values.
* Choose ``MULTICLASS`` if the ``MLModel`` result has a limited number of values.
For more information, see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
:type Parameters: dict
:param Parameters:
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling the data improves a model\'s ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` . We strongly recommend that you shuffle your data.
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can\'t be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can\'t be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
:type TrainingDataSourceId: string
:param TrainingDataSourceId: **[REQUIRED]**
The ``DataSource`` that points to the training data.
:type Recipe: string
:param Recipe:
The data recipe for creating the ``MLModel`` . You must specify either the recipe or its URI. If you don\'t specify a recipe or its URI, Amazon ML creates a default.
:type RecipeUri: string
:param RecipeUri:
The Amazon Simple Storage Service (Amazon S3) location and file name that contains the ``MLModel`` recipe. You must specify either the recipe or its URI. If you don\'t specify a recipe or its URI, Amazon ML creates a default.
:rtype: dict
:returns:
"""
pass
def create_realtime_endpoint(self, MLModelId: str) -> Dict:
"""
Creates a real-time endpoint for the ``MLModel`` . The endpoint contains the URI of the ``MLModel`` ; that is, the location to send real-time prediction requests for the specified ``MLModel`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateRealtimeEndpoint>`_
**Request Syntax**
::
response = client.create_realtime_endpoint(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string',
'RealtimeEndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
}
}
**Response Structure**
- *(dict) --*
Represents the output of an ``CreateRealtimeEndpoint`` operation.
The result contains the ``MLModelId`` and the endpoint information for the ``MLModel`` .
.. note::
The endpoint information includes the URI of the ``MLModel`` ; that is, the location to send online prediction requests for the specified ``MLModel`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
- **RealtimeEndpointInfo** *(dict) --*
The endpoint information of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:rtype: dict
:returns:
"""
pass
def delete_batch_prediction(self, BatchPredictionId: str) -> Dict:
"""
Assigns the DELETED status to a ``BatchPrediction`` , rendering it unusable.
After using the ``DeleteBatchPrediction`` operation, you can use the GetBatchPrediction operation to verify that the status of the ``BatchPrediction`` changed to DELETED.
**Caution:** The result of the ``DeleteBatchPrediction`` operation is irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteBatchPrediction>`_
**Request Syntax**
::
response = client.delete_batch_prediction(
BatchPredictionId='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteBatchPrediction`` operation.
You can use the ``GetBatchPrediction`` operation and check the value of the ``Status`` parameter to see whether a ``BatchPrediction`` is marked as ``DELETED`` .
- **BatchPredictionId** *(string) --*
A user-supplied ID that uniquely identifies the ``BatchPrediction`` . This value should be identical to the value of the ``BatchPredictionID`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``BatchPrediction`` .
:rtype: dict
:returns:
"""
pass
def delete_data_source(self, DataSourceId: str) -> Dict:
"""
Assigns the DELETED status to a ``DataSource`` , rendering it unusable.
After using the ``DeleteDataSource`` operation, you can use the GetDataSource operation to verify that the status of the ``DataSource`` changed to DELETED.
**Caution:** The results of the ``DeleteDataSource`` operation are irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteDataSource>`_
**Request Syntax**
::
response = client.delete_data_source(
DataSourceId='string'
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteDataSource`` operation.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the ``DataSource`` . This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` .
:rtype: dict
:returns:
"""
pass
def delete_evaluation(self, EvaluationId: str) -> Dict:
"""
Assigns the ``DELETED`` status to an ``Evaluation`` , rendering it unusable.
After invoking the ``DeleteEvaluation`` operation, you can use the ``GetEvaluation`` operation to verify that the status of the ``Evaluation`` changed to ``DELETED`` .
Caution
The results of the ``DeleteEvaluation`` operation are irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteEvaluation>`_
**Request Syntax**
::
response = client.delete_evaluation(
EvaluationId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteEvaluation`` operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.
You can use the ``GetEvaluation`` operation and check the value of the ``Status`` parameter to see whether an ``Evaluation`` is marked as ``DELETED`` .
- **EvaluationId** *(string) --*
A user-supplied ID that uniquely identifies the ``Evaluation`` . This value should be identical to the value of the ``EvaluationId`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``Evaluation`` to delete.
:rtype: dict
:returns:
"""
pass
def delete_ml_model(self, MLModelId: str) -> Dict:
"""
Assigns the ``DELETED`` status to an ``MLModel`` , rendering it unusable.
After using the ``DeleteMLModel`` operation, you can use the ``GetMLModel`` operation to verify that the status of the ``MLModel`` changed to DELETED.
**Caution:** The result of the ``DeleteMLModel`` operation is irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteMLModel>`_
**Request Syntax**
::
response = client.delete_ml_model(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteMLModel`` operation.
You can use the ``GetMLModel`` operation and check the value of the ``Status`` parameter to see whether an ``MLModel`` is marked as ``DELETED`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelID`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``MLModel`` .
:rtype: dict
:returns:
"""
pass
def delete_realtime_endpoint(self, MLModelId: str) -> Dict:
"""
Deletes a real time endpoint of an ``MLModel`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteRealtimeEndpoint>`_
**Request Syntax**
::
response = client.delete_realtime_endpoint(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string',
'RealtimeEndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
}
}
**Response Structure**
- *(dict) --*
Represents the output of an ``DeleteRealtimeEndpoint`` operation.
The result contains the ``MLModelId`` and the endpoint information for the ``MLModel`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
- **RealtimeEndpointInfo** *(dict) --*
The endpoint information of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:rtype: dict
:returns:
"""
pass
def delete_tags(self, TagKeys: List, ResourceId: str, ResourceType: str) -> Dict:
"""
Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.
If you specify a tag that doesn't exist, Amazon ML ignores it.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteTags>`_
**Request Syntax**
::
response = client.delete_tags(
TagKeys=[
'string',
],
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the ML object from which tags were deleted.
- **ResourceType** *(string) --*
The type of the ML object from which tags were deleted.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
One or more tags to delete.
- *(string) --*
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the tagged ML object. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the tagged ML object.
:rtype: dict
:returns:
"""
pass
def describe_batch_predictions(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``BatchPrediction`` operations that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeBatchPredictions>`_
**Request Syntax**
::
response = client.describe_batch_predictions(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'BatchPredictionId': 'string',
'MLModelId': 'string',
'BatchPredictionDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'OutputUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'TotalRecordCount': 123,
'InvalidRecordCount': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DescribeBatchPredictions`` operation. The content is essentially a list of ``BatchPrediction`` s.
- **Results** *(list) --*
A list of ``BatchPrediction`` objects that meet the search criteria.
- *(dict) --*
Represents the output of a ``GetBatchPrediction`` operation.
The content consists of the detailed metadata, the status, and the data file information of a ``Batch Prediction`` .
- **BatchPredictionId** *(string) --*
The ID assigned to the ``BatchPrediction`` at creation. This value should be identical to the value of the ``BatchPredictionID`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that generated predictions for the ``BatchPrediction`` request.
- **BatchPredictionDataSourceId** *(string) --*
The ID of the ``DataSource`` that points to the group of observations to predict.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the ``BatchPrediction`` . The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``BatchPrediction`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``BatchPrediction`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``BatchPrediction`` .
- **Status** *(string) --*
The status of the ``BatchPrediction`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.
* ``INPROGRESS`` - The process is underway.
* ``FAILED`` - The request to perform a batch prediction did not run to completion. It is not usable.
* ``COMPLETED`` - The batch prediction process completed successfully.
* ``DELETED`` - The ``BatchPrediction`` is marked as deleted. It is not usable.
- **OutputUri** *(string) --*
The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the ``s3 key`` portion of the ``outputURI`` field: ':', '//', '/./', '/../'.
- **Message** *(string) --*
A description of the most recent details about processing the batch prediction request.
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **TotalRecordCount** *(integer) --*
Long integer type that is a 64-bit signed number.
- **InvalidRecordCount** *(integer) --*
Long integer type that is a 64-bit signed number.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``BatchPrediction`` :
* ``CreatedAt`` - Sets the search criteria to the ``BatchPrediction`` creation date.
* ``Status`` - Sets the search criteria to the ``BatchPrediction`` status.
* ``Name`` - Sets the search criteria to the contents of the ``BatchPrediction`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``BatchPrediction`` creation.
* ``MLModelId`` - Sets the search criteria to the ``MLModel`` used in the ``BatchPrediction`` .
* ``DataSourceId`` - Sets the search criteria to the ``DataSource`` used in the ``BatchPrediction`` .
* ``DataURI`` - Sets the search criteria to the data file(s) used in the ``BatchPrediction`` . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, a ``Batch Prediction`` operation could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``BatchPrediction`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``MLModel`` s.
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
An ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The number of pages of information to include in the result. The range of acceptable values is ``1`` through ``100`` . The default value is ``100`` .
:rtype: dict
:returns:
"""
pass
def describe_data_sources(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``DataSource`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeDataSources>`_
**Request Syntax**
::
response = client.describe_data_sources(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'DataLocationS3'|'IAMUser',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'DataSourceId': 'string',
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'DataSizeInBytes': 123,
'NumberOfFiles': 123,
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'Message': 'string',
'RedshiftMetadata': {
'RedshiftDatabase': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string'
},
'RDSMetadata': {
'Database': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'DataPipelineId': 'string'
},
'RoleARN': 'string',
'ComputeStatistics': True|False,
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the query results from a DescribeDataSources operation. The content is essentially a list of ``DataSource`` .
- **Results** *(list) --*
A list of ``DataSource`` that meet the search criteria.
- *(dict) --*
Represents the output of the ``GetDataSource`` operation.
The content consists of the detailed metadata and data file information and the current status of the ``DataSource`` .
- **DataSourceId** *(string) --*
The ID that is assigned to the ``DataSource`` during creation.
- **DataLocationS3** *(string) --*
The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a ``DataSource`` .
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement requirement used when this ``DataSource`` was created.
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``DataSource`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``DataSource`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``BatchPrediction`` . The time is expressed in epoch time.
- **DataSizeInBytes** *(integer) --*
The total number of observations contained in the data files that the ``DataSource`` references.
- **NumberOfFiles** *(integer) --*
The number of data files referenced by the ``DataSource`` .
- **Name** *(string) --*
A user-supplied name or description of the ``DataSource`` .
- **Status** *(string) --*
The current status of the ``DataSource`` . This element can have one of the following values:
* PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a ``DataSource`` .
* INPROGRESS - The creation process is underway.
* FAILED - The request to create a ``DataSource`` did not run to completion. It is not usable.
* COMPLETED - The creation process completed successfully.
* DELETED - The ``DataSource`` is marked as deleted. It is not usable.
- **Message** *(string) --*
A description of the most recent details about creating the ``DataSource`` .
- **RedshiftMetadata** *(dict) --*
Describes the ``DataSource`` details specific to Amazon Redshift.
- **RedshiftDatabase** *(dict) --*
Describes the database details required to connect to an Amazon Redshift database.
- **DatabaseName** *(string) --*
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --*
The ID of an Amazon Redshift cluster.
- **DatabaseUserName** *(string) --*
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **SelectSqlQuery** *(string) --*
The SQL query that is specified during CreateDataSourceFromRedshift . Returns only if ``Verbose`` is true in GetDataSourceInput.
- **RDSMetadata** *(dict) --*
The datasource details that are specific to Amazon RDS.
- **Database** *(dict) --*
The database details required to connect to an Amazon RDS.
- **InstanceIdentifier** *(string) --*
The ID of an RDS DB instance.
- **DatabaseName** *(string) --*
The name of a database hosted on an RDS DB instance.
- **DatabaseUserName** *(string) --*
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **SelectSqlQuery** *(string) --*
The SQL query that is supplied during CreateDataSourceFromRDS . Returns only if ``Verbose`` is true in ``GetDataSourceInput`` .
- **ResourceRole** *(string) --*
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --*
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **DataPipelineId** *(string) --*
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
- **RoleARN** *(string) --*
The Amazon Resource Name (ARN) of an `AWS IAM Role <http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts>`__ , such as the following: arn:aws:iam::account:role/rolename.
- **ComputeStatistics** *(boolean) --*
The parameter is ``true`` if statistics need to be generated from the observation data.
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
An ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``DataSource`` :
* ``CreatedAt`` - Sets the search criteria to ``DataSource`` creation dates.
* ``Status`` - Sets the search criteria to ``DataSource`` statuses.
* ``Name`` - Sets the search criteria to the contents of ``DataSource`` **** ``Name`` .
* ``DataUri`` - Sets the search criteria to the URI of data files used to create the ``DataSource`` . The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``DataSource`` creation.
:type EQ: string
:param EQ:
The equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``DataSource`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``DataSource`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``DataSource`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, a ``DataSource`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``DataSource`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``DataSource`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The maximum number of ``DataSource`` to include in the result.
:rtype: dict
:returns:
"""
pass
def describe_evaluations(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``DescribeEvaluations`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeEvaluations>`_
**Request Syntax**
::
response = client.describe_evaluations(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'EvaluationId': 'string',
'MLModelId': 'string',
'EvaluationDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'PerformanceMetrics': {
'Properties': {
'string': 'string'
}
},
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the query results from a ``DescribeEvaluations`` operation. The content is essentially a list of ``Evaluation`` .
- **Results** *(list) --*
A list of ``Evaluation`` that meet the search criteria.
- *(dict) --*
Represents the output of ``GetEvaluation`` operation.
The content consists of the detailed metadata and data file information and the current status of the ``Evaluation`` .
- **EvaluationId** *(string) --*
The ID that is assigned to the ``Evaluation`` at creation.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that is the focus of the evaluation.
- **EvaluationDataSourceId** *(string) --*
The ID of the ``DataSource`` that is used to evaluate the ``MLModel`` .
- **InputDataLocationS3** *(string) --*
The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``Evaluation`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``Evaluation`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``Evaluation`` .
- **Status** *(string) --*
The status of the evaluation. This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an ``MLModel`` .
* ``INPROGRESS`` - The evaluation is underway.
* ``FAILED`` - The request to evaluate an ``MLModel`` did not run to completion. It is not usable.
* ``COMPLETED`` - The evaluation process completed successfully.
* ``DELETED`` - The ``Evaluation`` is marked as deleted. It is not usable.
- **PerformanceMetrics** *(dict) --*
Measurements of how well the ``MLModel`` performed, using observations referenced by the ``DataSource`` . One of the following metrics is returned, based on the type of the ``MLModel`` :
* BinaryAUC: A binary ``MLModel`` uses the Area Under the Curve (AUC) technique to measure performance.
* RegressionRMSE: A regression ``MLModel`` uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.
* MulticlassAvgFScore: A multiclass ``MLModel`` uses the F1 score technique to measure performance.
For more information about performance metrics, please see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
- **Properties** *(dict) --*
- *(string) --*
- *(string) --*
- **Message** *(string) --*
A description of the most recent details about evaluating the ``MLModel`` .
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variable to filter a list of ``Evaluation`` objects:
* ``CreatedAt`` - Sets the search criteria to the ``Evaluation`` creation date.
* ``Status`` - Sets the search criteria to the ``Evaluation`` status.
* ``Name`` - Sets the search criteria to the contents of ``Evaluation`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked an ``Evaluation`` .
* ``MLModelId`` - Sets the search criteria to the ``MLModel`` that was evaluated.
* ``DataSourceId`` - Sets the search criteria to the ``DataSource`` used in ``Evaluation`` .
* ``DataUri`` - Sets the search criteria to the data file(s) used in ``Evaluation`` . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``Evaluation`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``Evaluation`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, an ``Evaluation`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``Evaluation`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``Evaluation`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The maximum number of ``Evaluation`` to include in the result.
:rtype: dict
:returns:
"""
pass
def describe_ml_models(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``MLModel`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeMLModels>`_
**Request Syntax**
::
response = client.describe_ml_models(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'TrainingDataSourceId'|'RealtimeEndpointStatus'|'MLModelType'|'Algorithm'|'TrainingDataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'MLModelId': 'string',
'TrainingDataSourceId': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'SizeInBytes': 123,
'EndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
},
'TrainingParameters': {
'string': 'string'
},
'InputDataLocationS3': 'string',
'Algorithm': 'sgd',
'MLModelType': 'REGRESSION'|'BINARY'|'MULTICLASS',
'ScoreThreshold': ...,
'ScoreThresholdLastUpdatedAt': datetime(2015, 1, 1),
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DescribeMLModels`` operation. The content is essentially a list of ``MLModel`` .
- **Results** *(list) --*
A list of ``MLModel`` that meet the search criteria.
- *(dict) --*
Represents the output of a ``GetMLModel`` operation.
The content consists of the detailed metadata and the current status of the ``MLModel`` .
- **MLModelId** *(string) --*
The ID assigned to the ``MLModel`` at creation.
- **TrainingDataSourceId** *(string) --*
The ID of the training ``DataSource`` . The ``CreateMLModel`` operation uses the ``TrainingDataSourceId`` .
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``MLModel`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``MLModel`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``MLModel`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``MLModel`` .
- **Status** *(string) --*
The current status of an ``MLModel`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to create an ``MLModel`` .
* ``INPROGRESS`` - The creation process is underway.
* ``FAILED`` - The request to create an ``MLModel`` didn't run to completion. The model isn't usable.
* ``COMPLETED`` - The creation process completed successfully.
* ``DELETED`` - The ``MLModel`` is marked as deleted. It isn't usable.
- **SizeInBytes** *(integer) --*
Long integer type that is a 64-bit signed number.
- **EndpointInfo** *(dict) --*
The current endpoint of the ``MLModel`` .
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
- **TrainingParameters** *(dict) --*
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` .
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can't be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can't be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **Algorithm** *(string) --*
The algorithm used to train the ``MLModel`` . The following algorithm is supported:
* ``SGD`` -- Stochastic gradient descent. The goal of ``SGD`` is to minimize the gradient of the loss function.
- **MLModelType** *(string) --*
Identifies the ``MLModel`` category. The following are the available types:
* ``REGRESSION`` - Produces a numeric result. For example, "What price should a house be listed at?"
* ``BINARY`` - Produces one of two possible results. For example, "Is this a child-friendly web site?".
* ``MULTICLASS`` - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
- **ScoreThreshold** *(float) --*
- **ScoreThresholdLastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``ScoreThreshold`` . The time is expressed in epoch time.
- **Message** *(string) --*
A description of the most recent details about accessing the ``MLModel`` .
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``MLModel`` :
* ``CreatedAt`` - Sets the search criteria to ``MLModel`` creation date.
* ``Status`` - Sets the search criteria to ``MLModel`` status.
* ``Name`` - Sets the search criteria to the contents of ``MLModel`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``MLModel`` creation.
* ``TrainingDataSourceId`` - Sets the search criteria to the ``DataSource`` used to train one or more ``MLModel`` .
* ``RealtimeEndpointStatus`` - Sets the search criteria to the ``MLModel`` real-time endpoint status.
* ``MLModelType`` - Sets the search criteria to ``MLModel`` type: binary, regression, or multi-class.
* ``Algorithm`` - Sets the search criteria to the algorithm that the ``MLModel`` uses.
* ``TrainingDataURI`` - Sets the search criteria to the data file(s) used in training a ``MLModel`` . The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``MLModel`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``MLModel`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``MLModel`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, an ``MLModel`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``MLModel`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``MLModel`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The number of pages of information to include in the result. The range of acceptable values is ``1`` through ``100`` . The default value is ``100`` .
:rtype: dict
:returns:
"""
pass
def describe_tags(self, ResourceId: str, ResourceType: str) -> Dict:
"""
Describes one or more of the tags for your Amazon ML object.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeTags>`_
**Request Syntax**
::
response = client.describe_tags(
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the tagged ML object.
- **ResourceType** *(string) --*
The type of the tagged ML object.
- **Tags** *(list) --*
A list of tags associated with the ML object.
- *(dict) --*
A custom key-value pair associated with an ML object, such as an ML model.
- **Key** *(string) --*
A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
- **Value** *(string) --*
An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the ML object. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the ML object.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_batch_prediction(self, BatchPredictionId: str) -> Dict:
"""
Returns a ``BatchPrediction`` that includes detailed metadata, status, and data file information for a ``Batch Prediction`` request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetBatchPrediction>`_
**Request Syntax**
::
response = client.get_batch_prediction(
BatchPredictionId='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string',
'MLModelId': 'string',
'BatchPredictionDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'OutputUri': 'string',
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'TotalRecordCount': 123,
'InvalidRecordCount': 123
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetBatchPrediction`` operation and describes a ``BatchPrediction`` .
- **BatchPredictionId** *(string) --*
An ID assigned to the ``BatchPrediction`` at creation. This value should be identical to the value of the ``BatchPredictionID`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that generated predictions for the ``BatchPrediction`` request.
- **BatchPredictionDataSourceId** *(string) --*
The ID of the ``DataSource`` that was used to create the ``BatchPrediction`` .
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the ``BatchPrediction`` . The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time when the ``BatchPrediction`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to ``BatchPrediction`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``BatchPrediction`` .
- **Status** *(string) --*
The status of the ``BatchPrediction`` , which can be one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to generate batch predictions.
* ``INPROGRESS`` - The batch predictions are in progress.
* ``FAILED`` - The request to perform a batch prediction did not run to completion. It is not usable.
* ``COMPLETED`` - The batch prediction process completed successfully.
* ``DELETED`` - The ``BatchPrediction`` is marked as deleted. It is not usable.
- **OutputUri** *(string) --*
The location of an Amazon S3 bucket or directory to receive the operation results.
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateBatchPrediction`` operation.
- **Message** *(string) --*
A description of the most recent details about processing the batch prediction request.
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``BatchPrediction`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``BatchPrediction`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``BatchPrediction`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``BatchPrediction`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``BatchPrediction`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``BatchPrediction`` is in the ``PENDING`` state.
- **TotalRecordCount** *(integer) --*
The number of total records that Amazon Machine Learning saw while processing the ``BatchPrediction`` .
- **InvalidRecordCount** *(integer) --*
The number of invalid records that Amazon Machine Learning saw while processing the ``BatchPrediction`` .
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
An ID assigned to the ``BatchPrediction`` at creation.
:rtype: dict
:returns:
"""
pass
def get_data_source(self, DataSourceId: str, Verbose: bool = None) -> Dict:
"""
Returns a ``DataSource`` that includes metadata and data file information, as well as the current status of the ``DataSource`` .
``GetDataSource`` provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetDataSource>`_
**Request Syntax**
::
response = client.get_data_source(
DataSourceId='string',
Verbose=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string',
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'DataSizeInBytes': 123,
'NumberOfFiles': 123,
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'LogUri': 'string',
'Message': 'string',
'RedshiftMetadata': {
'RedshiftDatabase': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string'
},
'RDSMetadata': {
'Database': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'DataPipelineId': 'string'
},
'RoleARN': 'string',
'ComputeStatistics': True|False,
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'DataSourceSchema': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetDataSource`` operation and describes a ``DataSource`` .
- **DataSourceId** *(string) --*
The ID assigned to the ``DataSource`` at creation. This value should be identical to the value of the ``DataSourceId`` in the request.
- **DataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement requirement used when this ``DataSource`` was created.
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``DataSource`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``DataSource`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``DataSource`` . The time is expressed in epoch time.
- **DataSizeInBytes** *(integer) --*
The total size of observations in the data files.
- **NumberOfFiles** *(integer) --*
The number of data files referenced by the ``DataSource`` .
- **Name** *(string) --*
A user-supplied name or description of the ``DataSource`` .
- **Status** *(string) --*
The current status of the ``DataSource`` . This element can have one of the following values:
* ``PENDING`` - Amazon ML submitted a request to create a ``DataSource`` .
* ``INPROGRESS`` - The creation process is underway.
* ``FAILED`` - The request to create a ``DataSource`` did not run to completion. It is not usable.
* ``COMPLETED`` - The creation process completed successfully.
* ``DELETED`` - The ``DataSource`` is marked as deleted. It is not usable.
- **LogUri** *(string) --*
A link to the file containing logs of ``CreateDataSourceFrom*`` operations.
- **Message** *(string) --*
The user-supplied description of the most recent details about creating the ``DataSource`` .
- **RedshiftMetadata** *(dict) --*
Describes the ``DataSource`` details specific to Amazon Redshift.
- **RedshiftDatabase** *(dict) --*
Describes the database details required to connect to an Amazon Redshift database.
- **DatabaseName** *(string) --*
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --*
The ID of an Amazon Redshift cluster.
- **DatabaseUserName** *(string) --*
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **SelectSqlQuery** *(string) --*
The SQL query that is specified during CreateDataSourceFromRedshift . Returns only if ``Verbose`` is true in GetDataSourceInput.
- **RDSMetadata** *(dict) --*
The datasource details that are specific to Amazon RDS.
- **Database** *(dict) --*
The database details required to connect to an Amazon RDS.
- **InstanceIdentifier** *(string) --*
The ID of an RDS DB instance.
- **DatabaseName** *(string) --*
The name of a database hosted on an RDS DB instance.
- **DatabaseUserName** *(string) --*
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **SelectSqlQuery** *(string) --*
The SQL query that is supplied during CreateDataSourceFromRDS . Returns only if ``Verbose`` is true in ``GetDataSourceInput`` .
- **ResourceRole** *(string) --*
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --*
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **DataPipelineId** *(string) --*
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
- **RoleARN** *(string) --*
The Amazon Resource Name (ARN) of an `AWS IAM Role <http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts>`__ , such as the following: arn:aws:iam::account:role/rolename.
- **ComputeStatistics** *(boolean) --*
The parameter is ``true`` if statistics need to be generated from the observation data.
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``DataSource`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``DataSource`` is in the ``COMPLETED`` state and the ``ComputeStatistics`` is set to true.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``DataSource`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``DataSource`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``DataSource`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``DataSource`` is in the ``PENDING`` state.
- **DataSourceSchema** *(string) --*
The schema used by all of the data files of this ``DataSource`` .
.. note::
Note
This parameter is provided as part of the verbose format.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
The ID assigned to the ``DataSource`` at creation.
:type Verbose: boolean
:param Verbose:
Specifies whether the ``GetDataSource`` operation should return ``DataSourceSchema`` .
If true, ``DataSourceSchema`` is returned.
If false, ``DataSourceSchema`` is not returned.
:rtype: dict
:returns:
"""
pass
def get_evaluation(self, EvaluationId: str) -> Dict:
"""
Returns an ``Evaluation`` that includes metadata as well as the current status of the ``Evaluation`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetEvaluation>`_
**Request Syntax**
::
response = client.get_evaluation(
EvaluationId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string',
'MLModelId': 'string',
'EvaluationDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'PerformanceMetrics': {
'Properties': {
'string': 'string'
}
},
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetEvaluation`` operation and describes an ``Evaluation`` .
- **EvaluationId** *(string) --*
The evaluation ID which is same as the ``EvaluationId`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that was the focus of the evaluation.
- **EvaluationDataSourceId** *(string) --*
The ``DataSource`` used for this evaluation.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``Evaluation`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``Evaluation`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``Evaluation`` .
- **Status** *(string) --*
The status of the evaluation. This element can have one of the following values:
* ``PENDING`` - Amazon Machine Language (Amazon ML) submitted a request to evaluate an ``MLModel`` .
* ``INPROGRESS`` - The evaluation is underway.
* ``FAILED`` - The request to evaluate an ``MLModel`` did not run to completion. It is not usable.
* ``COMPLETED`` - The evaluation process completed successfully.
* ``DELETED`` - The ``Evaluation`` is marked as deleted. It is not usable.
- **PerformanceMetrics** *(dict) --*
Measurements of how well the ``MLModel`` performed using observations referenced by the ``DataSource`` . One of the following metric is returned based on the type of the ``MLModel`` :
* BinaryAUC: A binary ``MLModel`` uses the Area Under the Curve (AUC) technique to measure performance.
* RegressionRMSE: A regression ``MLModel`` uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.
* MulticlassAvgFScore: A multiclass ``MLModel`` uses the F1 score technique to measure performance.
For more information about performance metrics, please see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
- **Properties** *(dict) --*
- *(string) --*
- *(string) --*
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateEvaluation`` operation.
- **Message** *(string) --*
A description of the most recent details about evaluating the ``MLModel`` .
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``Evaluation`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``Evaluation`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``Evaluation`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``Evaluation`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``Evaluation`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``Evaluation`` is in the ``PENDING`` state.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
The ID of the ``Evaluation`` to retrieve. The evaluation of each ``MLModel`` is recorded and cataloged. The ID provides the means to access the information.
:rtype: dict
:returns:
"""
pass
def get_ml_model(self, MLModelId: str, Verbose: bool = None) -> Dict:
"""
Returns an ``MLModel`` that includes detailed metadata, data source information, and the current status of the ``MLModel`` .
``GetMLModel`` provides results in normal or verbose format.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetMLModel>`_
**Request Syntax**
::
response = client.get_ml_model(
MLModelId='string',
Verbose=True|False
)
**Response Syntax**
::
{
'MLModelId': 'string',
'TrainingDataSourceId': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'SizeInBytes': 123,
'EndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
},
'TrainingParameters': {
'string': 'string'
},
'InputDataLocationS3': 'string',
'MLModelType': 'REGRESSION'|'BINARY'|'MULTICLASS',
'ScoreThreshold': ...,
'ScoreThresholdLastUpdatedAt': datetime(2015, 1, 1),
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'Recipe': 'string',
'Schema': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetMLModel`` operation, and provides detailed information about a ``MLModel`` .
- **MLModelId** *(string) --*
The MLModel ID,which is same as the ``MLModelId`` in the request.
- **TrainingDataSourceId** *(string) --*
The ID of the training ``DataSource`` .
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``MLModel`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``MLModel`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``MLModel`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``MLModel`` .
- **Status** *(string) --*
The current status of the ``MLModel`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to describe a ``MLModel`` .
* ``INPROGRESS`` - The request is processing.
* ``FAILED`` - The request did not run to completion. The ML model isn't usable.
* ``COMPLETED`` - The request completed successfully.
* ``DELETED`` - The ``MLModel`` is marked as deleted. It isn't usable.
- **SizeInBytes** *(integer) --*
Long integer type that is a 64-bit signed number.
- **EndpointInfo** *(dict) --*
The current endpoint of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
- **TrainingParameters** *(dict) --*
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling data improves a model's ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` . We strongly recommend that you shuffle your data.
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can't be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can't be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **MLModelType** *(string) --*
Identifies the ``MLModel`` category. The following are the available types:
* REGRESSION -- Produces a numeric result. For example, "What price should a house be listed at?"
* BINARY -- Produces one of two possible results. For example, "Is this an e-commerce website?"
* MULTICLASS -- Produces one of several possible results. For example, "Is this a HIGH, LOW or MEDIUM risk trade?"
- **ScoreThreshold** *(float) --*
The scoring threshold is used in binary classification ``MLModel`` models. It marks the boundary between a positive prediction and a negative prediction.
Output values greater than or equal to the threshold receive a positive result from the MLModel, such as ``true`` . Output values less than the threshold receive a negative response from the MLModel, such as ``false`` .
- **ScoreThresholdLastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``ScoreThreshold`` . The time is expressed in epoch time.
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateMLModel`` operation.
- **Message** *(string) --*
A description of the most recent details about accessing the ``MLModel`` .
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``MLModel`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``MLModel`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``MLModel`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``MLModel`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``MLModel`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``MLModel`` is in the ``PENDING`` state.
- **Recipe** *(string) --*
The recipe to use when training the ``MLModel`` . The ``Recipe`` provides detailed information about the observation data to use during training, and manipulations to perform on the observation data during training.
.. note::
Note
This parameter is provided as part of the verbose format.
- **Schema** *(string) --*
The schema used by all of the data files referenced by the ``DataSource`` .
.. note::
Note
This parameter is provided as part of the verbose format.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` at creation.
:type Verbose: boolean
:param Verbose:
Specifies whether the ``GetMLModel`` operation should return ``Recipe`` .
If true, ``Recipe`` is returned.
If false, ``Recipe`` is not returned.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def predict(self, MLModelId: str, Record: Dict, PredictEndpoint: str) -> Dict:
"""
Generates a prediction for the observation using the specified ``ML Model`` .
.. note::
Note
Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/Predict>`_
**Request Syntax**
::
response = client.predict(
MLModelId='string',
Record={
'string': 'string'
},
PredictEndpoint='string'
)
**Response Syntax**
::
{
'Prediction': {
'predictedLabel': 'string',
'predictedValue': ...,
'predictedScores': {
'string': ...
},
'details': {
'string': 'string'
}
}
}
**Response Structure**
- *(dict) --*
- **Prediction** *(dict) --*
The output from a ``Predict`` operation:
* ``Details`` - Contains the following attributes: ``DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS`` ``DetailsAttributes.ALGORITHM - SGD``
* ``PredictedLabel`` - Present for either a ``BINARY`` or ``MULTICLASS`` ``MLModel`` request.
* ``PredictedScores`` - Contains the raw classification score corresponding to each label.
* ``PredictedValue`` - Present for a ``REGRESSION`` ``MLModel`` request.
- **predictedLabel** *(string) --*
The prediction label for either a ``BINARY`` or ``MULTICLASS`` ``MLModel`` .
- **predictedValue** *(float) --* The prediction value for ``REGRESSION`` ``MLModel`` .
- **predictedScores** *(dict) --* Provides the raw classification score corresponding to each label.
- *(string) --*
- *(float) --*
- **details** *(dict) --* Provides any additional details regarding the prediction.
- *(string) --* Contains the key values of ``DetailsMap`` : ``PredictiveModelType`` - Indicates the type of the ``MLModel`` . ``Algorithm`` - Indicates the algorithm that was used for the ``MLModel`` .
- *(string) --*
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A unique identifier of the ``MLModel`` .
:type Record: dict
:param Record: **[REQUIRED]**
A map of variable name-value pairs that represent an observation.
- *(string) --*
The name of a variable. Currently it\'s used to specify the name of the target value, label, weight, and tags.
- *(string) --*
The value of a variable. Currently it\'s used to specify values of the target value, weights, and tag variables and for filtering variable values.
:type PredictEndpoint: string
:param PredictEndpoint: **[REQUIRED]**
:rtype: dict
:returns:
"""
pass
def update_batch_prediction(self, BatchPredictionId: str, BatchPredictionName: str) -> Dict:
"""
Updates the ``BatchPredictionName`` of a ``BatchPrediction`` .
You can use the ``GetBatchPrediction`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateBatchPrediction>`_
**Request Syntax**
::
response = client.update_batch_prediction(
BatchPredictionId='string',
BatchPredictionName='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateBatchPrediction`` operation.
You can see the updated content by using the ``GetBatchPrediction`` operation.
- **BatchPredictionId** *(string) --*
The ID assigned to the ``BatchPrediction`` during creation. This value should be identical to the value of the ``BatchPredictionId`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
The ID assigned to the ``BatchPrediction`` during creation.
:type BatchPredictionName: string
:param BatchPredictionName: **[REQUIRED]**
A new user-supplied name or description of the ``BatchPrediction`` .
:rtype: dict
:returns:
"""
pass
def update_data_source(self, DataSourceId: str, DataSourceName: str) -> Dict:
"""
Updates the ``DataSourceName`` of a ``DataSource`` .
You can use the ``GetDataSource`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateDataSource>`_
**Request Syntax**
::
response = client.update_data_source(
DataSourceId='string',
DataSourceName='string'
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateDataSource`` operation.
You can see the updated content by using the ``GetBatchPrediction`` operation.
- **DataSourceId** *(string) --*
The ID assigned to the ``DataSource`` during creation. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
The ID assigned to the ``DataSource`` during creation.
:type DataSourceName: string
:param DataSourceName: **[REQUIRED]**
A new user-supplied name or description of the ``DataSource`` that will replace the current description.
:rtype: dict
:returns:
"""
pass
def update_evaluation(self, EvaluationId: str, EvaluationName: str) -> Dict:
"""
Updates the ``EvaluationName`` of an ``Evaluation`` .
You can use the ``GetEvaluation`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateEvaluation>`_
**Request Syntax**
::
response = client.update_evaluation(
EvaluationId='string',
EvaluationName='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateEvaluation`` operation.
You can see the updated content by using the ``GetEvaluation`` operation.
- **EvaluationId** *(string) --*
The ID assigned to the ``Evaluation`` during creation. This value should be identical to the value of the ``Evaluation`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
The ID assigned to the ``Evaluation`` during creation.
:type EvaluationName: string
:param EvaluationName: **[REQUIRED]**
A new user-supplied name or description of the ``Evaluation`` that will replace the current content.
:rtype: dict
:returns:
"""
pass
def update_ml_model(self, MLModelId: str, MLModelName: str = None, ScoreThreshold: float = None) -> Dict:
"""
Updates the ``MLModelName`` and the ``ScoreThreshold`` of an ``MLModel`` .
You can use the ``GetMLModel`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateMLModel>`_
**Request Syntax**
::
response = client.update_ml_model(
MLModelId='string',
MLModelName='string',
ScoreThreshold=...
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateMLModel`` operation.
You can see the updated content by using the ``GetMLModel`` operation.
- **MLModelId** *(string) --*
The ID assigned to the ``MLModel`` during creation. This value should be identical to the value of the ``MLModelID`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:type MLModelName: string
:param MLModelName:
A user-supplied name or description of the ``MLModel`` .
:type ScoreThreshold: float
:param ScoreThreshold:
The ``ScoreThreshold`` used in binary classification ``MLModel`` that marks the boundary between a positive prediction and a negative prediction.
Output values greater than or equal to the ``ScoreThreshold`` receive a positive result from the ``MLModel`` , such as ``true`` . Output values less than the ``ScoreThreshold`` receive a negative response from the ``MLModel`` , such as ``false`` .
:rtype: dict
:returns:
"""
pass
| 69.180519 | 2,268 | 0.601651 | 17,345 | 159,807 | 5.52949 | 0.055405 | 0.013555 | 0.005881 | 0.007674 | 0.850765 | 0.813876 | 0.79246 | 0.7734 | 0.741171 | 0.72989 | 0 | 0.009855 | 0.291376 | 159,807 | 2,309 | 2,269 | 69.210481 | 0.837076 | 0.857547 | 0 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.444444 | false | 0.444444 | 0.097222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 11 |
d3c53e6c2118d1a3006056831d6c9047bbeab6c8 | 104,940 | bzl | Python | dotnet/stdlib.net/net47/generated.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | dotnet/stdlib.net/net47/generated.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | dotnet/stdlib.net/net47/generated.bzl | marleypowell/rules_dotnet | b2a6c7583f7c2cf37ea62d0f1a703c7af4d333ef | [
"Apache-2.0"
] | null | null | null | load("@io_bazel_rules_dotnet//dotnet/private:rules/stdlib.bzl", "net_stdlib")
def define_stdlib(context_data):
net_stdlib(
name = "accessibility.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Accessibility.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Accessibility.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "custommarshalers.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/CustomMarshalers.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/CustomMarshalers.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "isymwrapper.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/ISymWrapper.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/ISymWrapper.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.activities.build.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Activities.Build.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Activities.Build.dll",
deps = [
":mscorlib.dll",
":xamlbuildtask.dll",
":system.xaml.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.activities.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "microsoft.build.conversion.v4.0.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Conversion.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Conversion.v4.0.dll",
deps = [
":mscorlib.dll",
":microsoft.build.dll",
":system.dll",
":microsoft.build.engine.dll",
":system.core.dll",
]
)
net_stdlib(
name = "microsoft.build.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.framework.dll",
":system.core.dll",
":microsoft.build.engine.dll",
]
)
net_stdlib(
name = "microsoft.build.engine.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Engine.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Engine.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.framework.dll",
]
)
net_stdlib(
name = "microsoft.build.framework.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Framework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Framework.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.build.tasks.v4.0.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Tasks.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Tasks.v4.0.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.core.dll",
":system.security.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "microsoft.build.utilities.v4.0.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Utilities.v4.0.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.Build.Utilities.v4.0.dll",
deps = [
":mscorlib.dll",
":microsoft.build.framework.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "microsoft.csharp.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.CSharp.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.CSharp.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.dynamic.dll",
]
)
net_stdlib(
name = "microsoft.jscript.dll",
version = "10.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.JScript.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.JScript.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.compatibility.data.dll",
version = "10.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.Compatibility.Data.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.Compatibility.Data.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
":microsoft.visualbasic.dll",
":microsoft.visualbasic.compatibility.dll",
":system.security.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.compatibility.dll",
version = "10.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.Compatibility.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.Compatibility.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":microsoft.visualbasic.dll",
]
)
net_stdlib(
name = "microsoft.visualbasic.dll",
version = "10.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualBasic.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.deployment.dll",
":system.management.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "microsoft.visualc.dll",
version = "10.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualC.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualC.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "microsoft.visualc.stlclr.dll",
version = "2.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualC.STLCLR.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Microsoft.VisualC.STLCLR.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "mscorlib.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/mscorlib.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/mscorlib.dll",
deps = [
]
)
net_stdlib(
name = "presentationbuildtasks.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationBuildTasks.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationBuildTasks.dll",
deps = [
":mscorlib.dll",
":system.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
]
)
net_stdlib(
name = "presentationcore.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationCore.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationCore.dll",
deps = [
":mscorlib.dll",
":system.dll",
":windowsbase.dll",
":system.xaml.dll",
":uiautomationtypes.dll",
":system.windows.input.manipulations.dll",
":uiautomationprovider.dll",
":system.deployment.dll",
]
)
net_stdlib(
name = "presentationframework.aero.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Aero.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Aero.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.aero2.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Aero2.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Aero2.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.aerolite.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.AeroLite.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.AeroLite.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.classic.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Classic.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Classic.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.core.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
":reachframework.dll",
":accessibility.dll",
":system.deployment.dll",
]
)
net_stdlib(
name = "presentationframework.luna.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Luna.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Luna.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "presentationframework.royale.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Royale.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/PresentationFramework.Royale.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":system.dll",
":presentationcore.dll",
":system.xaml.dll",
]
)
net_stdlib(
name = "reachframework.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/ReachFramework.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/ReachFramework.dll",
deps = [
":mscorlib.dll",
":presentationcore.dll",
":windowsbase.dll",
":system.dll",
":system.drawing.dll",
":system.security.dll",
]
)
net_stdlib(
name = "sysglobl.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/sysglobl.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/sysglobl.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.activities.core.presentation.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.Core.Presentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.Core.Presentation.dll",
deps = [
":mscorlib.dll",
":system.dll",
":windowsbase.dll",
":system.activities.presentation.dll",
":system.xaml.dll",
":presentationcore.dll",
":system.activities.dll",
":system.servicemodel.activities.dll",
":system.xml.linq.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.windows.presentation.dll",
]
)
net_stdlib(
name = "system.activities.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.core.dll",
":system.dll",
":system.xml.linq.dll",
":system.runtime.serialization.dll",
":system.runtime.durableinstancing.dll",
":microsoft.visualbasic.dll",
]
)
net_stdlib(
name = "system.activities.durableinstancing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.DurableInstancing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.DurableInstancing.dll",
deps = [
":mscorlib.dll",
":system.runtime.durableinstancing.dll",
":system.xml.linq.dll",
":system.activities.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.dll",
]
)
net_stdlib(
name = "system.activities.presentation.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.Presentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Activities.Presentation.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
":windowsbase.dll",
":presentationcore.dll",
":system.activities.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.drawing.dll",
":windowsformsintegration.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
":reachframework.dll",
":system.servicemodel.activities.dll",
":system.componentmodel.composition.dll",
]
)
net_stdlib(
name = "system.addin.contract.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.AddIn.Contract.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.AddIn.Contract.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.addin.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.AddIn.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.AddIn.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.addin.contract.dll",
]
)
net_stdlib(
name = "system.componentmodel.composition.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.Composition.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.Composition.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
]
)
net_stdlib(
name = "system.componentmodel.composition.registration.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.Composition.Registration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.Composition.Registration.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.componentmodel.composition.dll",
":system.core.dll",
":system.reflection.context.dll",
]
)
net_stdlib(
name = "system.componentmodel.dataannotations.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.DataAnnotations.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ComponentModel.DataAnnotations.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.configuration.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Configuration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Configuration.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.security.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.configuration.install.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Configuration.Install.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Configuration.Install.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.core.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Core.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Core.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.numerics.dll",
":system.security.dll",
]
)
net_stdlib(
name = "system.data.datasetextensions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.DataSetExtensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.DataSetExtensions.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.data.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.numerics.dll",
":system.runtime.caching.dll",
":system.core.dll",
":system.enterpriseservices.dll",
]
)
net_stdlib(
name = "system.data.entity.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Entity.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Entity.Design.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.data.entity.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.data.datasetextensions.dll",
]
)
net_stdlib(
name = "system.data.entity.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Entity.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Entity.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.componentmodel.dataannotations.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.linq.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Linq.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Linq.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.oracleclient.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.OracleClient.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.OracleClient.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.enterpriseservices.dll",
]
)
net_stdlib(
name = "system.data.services.client.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.Client.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.Client.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.data.services.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.Design.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.data.entity.dll",
":system.data.services.client.dll",
":system.xml.linq.dll",
":system.web.extensions.dll",
]
)
net_stdlib(
name = "system.data.services.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.Services.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.data.services.client.dll",
":system.servicemodel.web.dll",
":system.servicemodel.activation.dll",
":system.runtime.serialization.dll",
":system.data.entity.dll",
":system.xml.linq.dll",
":system.data.linq.dll",
]
)
net_stdlib(
name = "system.data.sqlxml.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.SqlXml.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Data.SqlXml.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.deployment.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Deployment.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Deployment.dll",
deps = [
":mscorlib.dll",
":system.security.dll",
":system.dll",
":system.drawing.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Design.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.data.oracleclient.dll",
":accessibility.dll",
":system.drawing.design.dll",
":system.web.regularexpressions.dll",
":system.runtime.serialization.formatters.soap.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.device.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Device.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Device.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.directoryservices.accountmanagement.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.AccountManagement.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.AccountManagement.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.directoryservices.dll",
":system.directoryservices.protocols.dll",
]
)
net_stdlib(
name = "system.directoryservices.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.directoryservices.protocols.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.Protocols.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.DirectoryServices.Protocols.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.directoryservices.dll",
]
)
net_stdlib(
name = "system.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.drawing.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Drawing.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Drawing.Design.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
]
)
net_stdlib(
name = "system.drawing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Drawing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Drawing.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.dynamic.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Dynamic.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Dynamic.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
]
)
net_stdlib(
name = "system.enterpriseservices.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.EnterpriseServices.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.EnterpriseServices.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.directoryservices.dll",
]
)
net_stdlib(
name = "system.identitymodel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.dll",
deps = [
":mscorlib.dll",
":system.runtime.serialization.dll",
":system.dll",
":system.core.dll",
":system.web.applicationservices.dll",
":system.security.dll",
]
)
net_stdlib(
name = "system.identitymodel.selectors.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.Selectors.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.Selectors.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.identitymodel.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.identitymodel.services.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.Services.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IdentityModel.Services.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.identitymodel.dll",
":system.runtime.serialization.dll",
":system.web.applicationservices.dll",
]
)
net_stdlib(
name = "system.io.compression.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Compression.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Compression.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.io.compression.filesystem.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Compression.FileSystem.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Compression.FileSystem.dll",
deps = [
":mscorlib.dll",
":system.io.compression.dll",
":system.dll",
]
)
net_stdlib(
name = "system.io.log.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Log.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.IO.Log.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.management.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Management.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Management.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.configuration.install.dll",
":microsoft.jscript.dll",
]
)
net_stdlib(
name = "system.management.instrumentation.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Management.Instrumentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Management.Instrumentation.dll",
deps = [
":mscorlib.dll",
":system.management.dll",
":system.dll",
":system.core.dll",
":system.configuration.install.dll",
]
)
net_stdlib(
name = "system.messaging.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Messaging.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Messaging.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.directoryservices.dll",
":system.configuration.install.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.net.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.net.http.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.Http.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.Http.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.net.http.webrequest.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.Http.WebRequest.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Net.Http.WebRequest.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.net.http.dll",
]
)
net_stdlib(
name = "system.numerics.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Numerics.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Numerics.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.printing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Printing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Printing.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
":system.xaml.dll",
":windowsbase.dll",
":reachframework.dll",
":presentationcore.dll",
]
)
net_stdlib(
name = "system.reflection.context.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Reflection.Context.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Reflection.Context.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.runtime.caching.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Caching.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Caching.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.runtime.durableinstancing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.DurableInstancing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.DurableInstancing.dll",
deps = [
":mscorlib.dll",
":system.xml.linq.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.dll",
]
)
net_stdlib(
name = "system.runtime.remoting.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Remoting.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Remoting.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.runtime.serialization.formatters.soap.dll",
":system.directoryservices.dll",
]
)
net_stdlib(
name = "system.runtime.serialization.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Serialization.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Serialization.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.runtime.serialization.formatters.soap.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Serialization.Formatters.Soap.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Runtime.Serialization.Formatters.Soap.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.security.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Security.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Security.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.servicemodel.activation.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Activation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Activation.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.servicemodel.activities.dll",
":system.activities.dll",
":system.xaml.dll",
":system.xml.linq.dll",
":system.core.dll",
":system.net.http.dll",
":system.web.regularexpressions.dll",
":system.runtime.durableinstancing.dll",
]
)
net_stdlib(
name = "system.servicemodel.activities.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Activities.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Activities.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.xml.linq.dll",
":system.dll",
":system.identitymodel.dll",
":system.activities.dll",
":system.core.dll",
":system.runtime.durableinstancing.dll",
":system.runtime.serialization.dll",
":system.activities.durableinstancing.dll",
]
)
net_stdlib(
name = "system.servicemodel.channels.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Channels.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Channels.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.runtime.serialization.dll",
":system.dll",
":system.net.http.dll",
":system.web.services.dll",
]
)
net_stdlib(
name = "system.servicemodel.discovery.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Discovery.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Discovery.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.servicemodel.channels.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.servicemodel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.identitymodel.dll",
":system.directoryservices.dll",
":system.web.services.dll",
":system.enterpriseservices.dll",
":system.identitymodel.selectors.dll",
":system.web.applicationservices.dll",
":system.messaging.dll",
":system.xml.linq.dll",
":system.runtime.durableinstancing.dll",
":system.serviceprocess.dll",
":system.net.http.dll",
":system.servicemodel.activation.dll",
":system.security.dll",
]
)
net_stdlib(
name = "system.servicemodel.routing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Routing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Routing.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.runtime.durableinstancing.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.servicemodel.web.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Web.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceModel.Web.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.runtime.serialization.dll",
":system.xml.linq.dll",
":system.web.extensions.dll",
":system.servicemodel.activation.dll",
":system.core.dll",
":system.servicemodel.channels.dll",
]
)
net_stdlib(
name = "system.serviceprocess.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceProcess.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.ServiceProcess.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.configuration.install.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.speech.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Speech.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Speech.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.transactions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Transactions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Transactions.dll",
deps = [
":mscorlib.dll",
":system.enterpriseservices.dll",
":system.dll",
]
)
net_stdlib(
name = "system.web.abstractions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Abstractions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Abstractions.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.web.applicationservices.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.ApplicationServices.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.ApplicationServices.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.web.datavisualization.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DataVisualization.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DataVisualization.Design.dll",
deps = [
":mscorlib.dll",
":system.web.datavisualization.dll",
":system.drawing.dll",
":system.dll",
":system.drawing.design.dll",
]
)
net_stdlib(
name = "system.web.datavisualization.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DataVisualization.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DataVisualization.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.web.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.core.dll",
":system.directoryservices.dll",
":system.enterpriseservices.dll",
":system.web.regularexpressions.dll",
":system.web.applicationservices.dll",
":system.componentmodel.dataannotations.dll",
":system.directoryservices.protocols.dll",
":system.security.dll",
":system.serviceprocess.dll",
":system.web.services.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":microsoft.build.tasks.v4.0.dll",
":system.runtime.caching.dll",
]
)
net_stdlib(
name = "system.web.dynamicdata.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DynamicData.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DynamicData.Design.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.web.dynamicdata.dll",
":system.core.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.web.dynamicdata.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DynamicData.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.DynamicData.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.web.extensions.dll",
":system.core.dll",
":system.data.linq.dll",
":system.componentmodel.dataannotations.dll",
":system.web.entity.dll",
":system.data.entity.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.web.entity.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Entity.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Entity.Design.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.web.entity.dll",
":system.data.entity.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.web.entity.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Entity.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Entity.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.web.extensions.dll",
":system.data.entity.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.web.extensions.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Extensions.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Extensions.Design.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.web.extensions.dll",
":system.data.linq.dll",
]
)
net_stdlib(
name = "system.web.extensions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Extensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Extensions.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.dll",
":system.web.services.dll",
":system.core.dll",
":system.runtime.serialization.dll",
":system.data.linq.dll",
":system.web.applicationservices.dll",
":system.servicemodel.activation.dll",
":system.data.services.client.dll",
":system.data.entity.dll",
]
)
net_stdlib(
name = "system.web.mobile.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Mobile.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Mobile.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
":system.drawing.design.dll",
":system.web.regularexpressions.dll",
]
)
net_stdlib(
name = "system.web.regularexpressions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.RegularExpressions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.RegularExpressions.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.web.routing.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Routing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Routing.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.web.services.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Services.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Web.Services.dll",
deps = [
":mscorlib.dll",
":system.enterpriseservices.dll",
":system.dll",
":system.directoryservices.dll",
]
)
net_stdlib(
name = "system.windows.controls.ribbon.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Controls.Ribbon.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Controls.Ribbon.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":windowsbase.dll",
":presentationcore.dll",
":system.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
]
)
net_stdlib(
name = "system.windows.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.windows.forms.datavisualization.design.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.DataVisualization.Design.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.DataVisualization.Design.dll",
deps = [
":mscorlib.dll",
":system.windows.forms.datavisualization.dll",
":system.drawing.dll",
":system.dll",
":system.drawing.design.dll",
]
)
net_stdlib(
name = "system.windows.forms.datavisualization.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.DataVisualization.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.DataVisualization.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
]
)
net_stdlib(
name = "system.windows.forms.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Forms.dll",
deps = [
":mscorlib.dll",
":system.drawing.dll",
":system.security.dll",
":system.dll",
":system.core.dll",
":accessibility.dll",
":system.deployment.dll",
":system.runtime.serialization.formatters.soap.dll",
]
)
net_stdlib(
name = "system.windows.input.manipulations.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Input.Manipulations.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Input.Manipulations.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.windows.presentation.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Presentation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Windows.Presentation.dll",
deps = [
":mscorlib.dll",
":system.dll",
":windowsbase.dll",
":system.addin.contract.dll",
":presentationcore.dll",
":system.addin.dll",
]
)
net_stdlib(
name = "system.workflow.activities.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.Activities.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.Activities.dll",
deps = [
":mscorlib.dll",
":system.workflow.componentmodel.dll",
":system.dll",
":system.drawing.dll",
":system.workflow.runtime.dll",
":system.web.services.dll",
":system.directoryservices.dll",
":system.web.applicationservices.dll",
]
)
net_stdlib(
name = "system.workflow.componentmodel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.ComponentModel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.ComponentModel.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.drawing.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.core.dll",
":microsoft.build.tasks.v4.0.dll",
]
)
net_stdlib(
name = "system.workflow.runtime.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.Runtime.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Workflow.Runtime.dll",
deps = [
":mscorlib.dll",
":system.workflow.componentmodel.dll",
":system.activities.dll",
":system.dll",
":system.core.dll",
":system.xml.linq.dll",
":system.runtime.serialization.dll",
":system.messaging.dll",
]
)
net_stdlib(
name = "system.workflowservices.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.WorkflowServices.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.WorkflowServices.dll",
deps = [
":mscorlib.dll",
":system.workflow.componentmodel.dll",
":system.workflow.runtime.dll",
":system.dll",
":system.identitymodel.dll",
":system.drawing.dll",
":system.runtime.serialization.dll",
":system.servicemodel.activities.dll",
":system.activities.dll",
":system.servicemodel.activation.dll",
":system.messaging.dll",
]
)
net_stdlib(
name = "system.xaml.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xaml.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xaml.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.xml.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.data.sqlxml.dll",
]
)
net_stdlib(
name = "system.xml.linq.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.Linq.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.Linq.dll",
deps = [
":mscorlib.dll",
":system.runtime.serialization.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.xml.serialization.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.Serialization.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/System.Xml.Serialization.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "uiautomationclient.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationClient.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationClient.dll",
deps = [
":mscorlib.dll",
":windowsbase.dll",
":uiautomationtypes.dll",
":uiautomationprovider.dll",
":system.dll",
":accessibility.dll",
]
)
net_stdlib(
name = "uiautomationclientsideproviders.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationClientsideProviders.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationClientsideProviders.dll",
deps = [
":mscorlib.dll",
":uiautomationclient.dll",
":windowsbase.dll",
":accessibility.dll",
":system.dll",
":uiautomationprovider.dll",
":uiautomationtypes.dll",
]
)
net_stdlib(
name = "uiautomationprovider.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationProvider.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationProvider.dll",
deps = [
":mscorlib.dll",
":uiautomationtypes.dll",
":windowsbase.dll",
]
)
net_stdlib(
name = "uiautomationtypes.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationTypes.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/UIAutomationTypes.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "windowsbase.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/WindowsBase.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/WindowsBase.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":system.dll",
":accessibility.dll",
":system.core.dll",
":system.security.dll",
]
)
net_stdlib(
name = "windowsformsintegration.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/WindowsFormsIntegration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/WindowsFormsIntegration.dll",
deps = [
":mscorlib.dll",
":system.xaml.dll",
":windowsbase.dll",
":presentationcore.dll",
":system.drawing.dll",
":system.dll",
":uiautomationprovider.dll",
]
)
net_stdlib(
name = "xamlbuildtask.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/XamlBuildTask.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/XamlBuildTask.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
":system.xaml.dll",
":microsoft.build.utilities.v4.0.dll",
":microsoft.build.framework.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.collections.concurrent.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Collections.Concurrent.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Collections.Concurrent.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.collections.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Collections.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Collections.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
]
)
net_stdlib(
name = "system.componentmodel.annotations.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.Annotations.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.Annotations.dll",
deps = [
":mscorlib.dll",
":system.componentmodel.dataannotations.dll",
]
)
net_stdlib(
name = "system.componentmodel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.componentmodel.eventbasedasync.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.EventBasedAsync.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ComponentModel.EventBasedAsync.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.diagnostics.contracts.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Contracts.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Contracts.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.diagnostics.debug.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Debug.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Debug.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.diagnostics.tools.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Tools.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Tools.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.diagnostics.tracing.dll",
version = "4.0.20.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Tracing.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Diagnostics.Tracing.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.dynamic.runtime.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Dynamic.Runtime.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Dynamic.Runtime.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.globalization.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Globalization.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Globalization.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.io.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.IO.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.IO.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.linq.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.linq.expressions.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Expressions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Expressions.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.linq.parallel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Parallel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Parallel.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.linq.queryable.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Queryable.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Linq.Queryable.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.net.networkinformation.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.NetworkInformation.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.NetworkInformation.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.net.primitives.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.Primitives.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.Primitives.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.net.requests.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.Requests.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.Requests.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.net.webheadercollection.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.WebHeaderCollection.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Net.WebHeaderCollection.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.objectmodel.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ObjectModel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ObjectModel.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.reflection.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.reflection.emit.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.reflection.emit.ilgeneration.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.ILGeneration.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.ILGeneration.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.reflection.emit.lightweight.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.Lightweight.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Emit.Lightweight.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.reflection.extensions.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Extensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Extensions.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.reflection.primitives.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Primitives.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Reflection.Primitives.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.resources.resourcemanager.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Resources.ResourceManager.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Resources.ResourceManager.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.runtime.dll",
version = "4.0.20.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
":system.componentmodel.composition.dll",
]
)
net_stdlib(
name = "system.runtime.extensions.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Extensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Extensions.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.runtime.handles.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Handles.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Handles.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.runtime.interopservices.dll",
version = "4.0.20.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.InteropServices.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.InteropServices.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
":system.dll",
]
)
net_stdlib(
name = "system.runtime.interopservices.windowsruntime.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.InteropServices.WindowsRuntime.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.InteropServices.WindowsRuntime.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.runtime.numerics.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Numerics.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Numerics.dll",
deps = [
":mscorlib.dll",
":system.numerics.dll",
]
)
net_stdlib(
name = "system.runtime.serialization.json.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Json.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Json.dll",
deps = [
":mscorlib.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.runtime.serialization.primitives.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Primitives.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Primitives.dll",
deps = [
":mscorlib.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.runtime.serialization.xml.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Xml.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Runtime.Serialization.Xml.dll",
deps = [
":mscorlib.dll",
":system.runtime.serialization.dll",
]
)
net_stdlib(
name = "system.security.principal.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Security.Principal.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Security.Principal.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.servicemodel.duplex.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Duplex.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Duplex.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.servicemodel.http.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Http.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Http.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.servicemodel.nettcp.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.NetTcp.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.NetTcp.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.servicemodel.primitives.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Primitives.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Primitives.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.servicemodel.security.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Security.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.ServiceModel.Security.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.text.encoding.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.Encoding.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.Encoding.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.text.encoding.extensions.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.Encoding.Extensions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.Encoding.Extensions.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.text.regularexpressions.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.RegularExpressions.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Text.RegularExpressions.dll",
deps = [
":mscorlib.dll",
":system.dll",
]
)
net_stdlib(
name = "system.threading.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.dll",
deps = [
":mscorlib.dll",
":system.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.threading.tasks.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Tasks.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Tasks.dll",
deps = [
":mscorlib.dll",
":system.core.dll",
]
)
net_stdlib(
name = "system.threading.tasks.parallel.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Tasks.Parallel.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Tasks.Parallel.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.threading.timer.dll",
version = "4.0.0.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Timer.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Threading.Timer.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.xml.readerwriter.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.ReaderWriter.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.ReaderWriter.dll",
deps = [
":mscorlib.dll",
]
)
net_stdlib(
name = "system.xml.xdocument.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.XDocument.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.XDocument.dll",
deps = [
":mscorlib.dll",
":system.xml.linq.dll",
]
)
net_stdlib(
name = "system.xml.xmlserializer.dll",
version = "4.0.10.0",
dotnet_context_data = context_data,
ref = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.XmlSerializer.dll",
stdlib_path = "@Microsoft.NETFramework.ReferenceAssemblies.net47.1.0.0//:build/.NETFramework/v4.7/Facades/System.Xml.XmlSerializer.dll",
deps = [
":mscorlib.dll",
]
)
| 44.484951 | 165 | 0.612121 | 11,254 | 104,940 | 5.626 | 0.015817 | 0.02129 | 0.231225 | 0.260128 | 0.973608 | 0.95674 | 0.929101 | 0.897828 | 0.876601 | 0.833531 | 0 | 0.04197 | 0.235982 | 104,940 | 2,358 | 166 | 44.503817 | 0.74773 | 0 | 0 | 0.601188 | 0 | 0.155282 | 0.581199 | 0.496245 | 0 | 0 | 0 | 0 | 0 | 1 | 0.000424 | false | 0 | 0 | 0 | 0.000424 | 0.000424 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
312eeddb3109f18b5f11a636ced509281adb542c | 5,736 | py | Python | tests/sentry/api/serializers/test_team.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2019-05-28T06:18:03.000Z | 2019-05-28T06:18:03.000Z | tests/sentry/api/serializers/test_team.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 6 | 2018-10-19T10:04:23.000Z | 2019-12-09T20:29:12.000Z | tests/sentry/api/serializers/test_team.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2020-07-03T00:52:19.000Z | 2020-07-03T00:52:19.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.testutils import TestCase
class TeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop('dateCreated')
assert result == {
'slug': team.slug,
'name': team.name,
'hasAccess': True,
'isPending': False,
'isMember': False,
'id': six.text_type(team.id),
'avatar': {
'avatarType': 'letter_avatar',
'avatarUuid': None,
},
}
def test_member_access(self):
user = self.create_user(username='foo')
organization = self.create_organization()
self.create_member(user=user, organization=organization)
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop('dateCreated')
assert result['hasAccess'] is True
assert result['isMember'] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result['hasAccess'] is False
assert result['isMember'] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result['hasAccess'] is True
assert result['isMember'] is True
def test_admin_access(self):
user = self.create_user(username='foo')
organization = self.create_organization()
self.create_member(user=user, organization=organization, role='admin')
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop('dateCreated')
assert result['hasAccess'] is True
assert result['isMember'] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result['hasAccess'] is False
assert result['isMember'] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result['hasAccess'] is True
assert result['isMember'] is True
def test_manager_access(self):
user = self.create_user(username='foo')
organization = self.create_organization()
self.create_member(user=user, organization=organization, role='manager')
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop('dateCreated')
assert result['hasAccess'] is True
assert result['isMember'] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result['hasAccess'] is True
assert result['isMember'] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result['hasAccess'] is True
assert result['isMember'] is True
def test_owner_access(self):
user = self.create_user(username='foo')
organization = self.create_organization()
self.create_member(user=user, organization=organization, role='owner')
team = self.create_team(organization=organization)
result = serialize(team, user)
result.pop('dateCreated')
assert result['hasAccess'] is True
assert result['isMember'] is False
organization.flags.allow_joinleave = False
organization.save()
result = serialize(team, user)
# after changing to allow_joinleave=False
assert result['hasAccess'] is True
assert result['isMember'] is False
self.create_team_membership(user=user, team=team)
result = serialize(team, user)
# after giving them access to team
assert result['hasAccess'] is True
assert result['isMember'] is True
class TeamWithProjectsSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(teams=[team], organization=organization, name='foo')
project2 = self.create_project(teams=[team], organization=organization, name='bar')
result = serialize(team, user, TeamWithProjectsSerializer())
result.pop('dateCreated')
# don't compare dateCreated because of mysql
serialized_projects = serialize([project2, project], user)
for p in serialized_projects:
p.pop('dateCreated')
for p in result['projects']:
p.pop('dateCreated')
assert result == {
'slug': team.slug,
'name': team.name,
'hasAccess': True,
'isPending': False,
'isMember': False,
'id': six.text_type(team.id),
'projects': serialized_projects,
'avatar': {
'avatarType': 'letter_avatar',
'avatarUuid': None,
},
}
| 34.347305 | 91 | 0.632497 | 604 | 5,736 | 5.903974 | 0.13245 | 0.078519 | 0.074593 | 0.090297 | 0.845766 | 0.845766 | 0.82221 | 0.82221 | 0.791924 | 0.791924 | 0 | 0.000714 | 0.267434 | 5,736 | 166 | 92 | 34.554217 | 0.84793 | 0.062064 | 0 | 0.788618 | 0 | 0 | 0.092941 | 0 | 0 | 0 | 0 | 0 | 0.211382 | 1 | 0.04878 | false | 0 | 0.04065 | 0 | 0.105691 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.