code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
def run_faq(domain_file="config/faq_domain.yml",
training_data_file='data/stories.md'):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=2), KerasPolicy(max_history=3, epochs=100, batch_size=50)])
data = agent.load_data(training_data_file)
model_path = './models/dialogue'
agent.train(data)
agent.persist(model_path)
if __name__ == '__main__':
logging.basicConfig(level="INFO")
run_faq()
| [
"logging.basicConfig",
"rasa_core.policies.keras_policy.KerasPolicy",
"rasa_core.policies.memoization.MemoizationPolicy"
] | [((700, 733), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (719, 733), False, 'import logging\n'), ((440, 472), 'rasa_core.policies.memoization.MemoizationPolicy', 'MemoizationPolicy', ([], {'max_history': '(2)'}), '(max_history=2)\n', (457, 472), False, 'from rasa_core.policies.memoization import MemoizationPolicy\n'), ((474, 527), 'rasa_core.policies.keras_policy.KerasPolicy', 'KerasPolicy', ([], {'max_history': '(3)', 'epochs': '(100)', 'batch_size': '(50)'}), '(max_history=3, epochs=100, batch_size=50)\n', (485, 527), False, 'from rasa_core.policies.keras_policy import KerasPolicy\n')] |
# fmt: off
import logging
from pathlib import Path
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.processor import RegressionProcessor, TextPairClassificationProcessor
from farm.experiment import initialize_optimizer
from farm.infer import Inferencer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import RegressionHead, TextClassificationHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results
from farm.evaluation.msmarco_passage_farm import msmarco_evaluation
def text_pair_classification():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(experiment_name="Public_FARM", run_name="Run_text_pair_classification")
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
n_epochs = 2
batch_size = 64
evaluate_every = 500
lang_model = "bert-base-cased"
label_list = ["0", "1"]
train_filename = "train.tsv"
dev_filename = "dev_200k.tsv"
# The source data can be found here https://github.com/microsoft/MSMARCO-Passage-Ranking
generate_data = False
data_dir = Path("../data/msmarco_passage")
predictions_raw_filename = "predictions_raw.txt"
predictions_filename = "predictions.txt"
train_source_filename = "triples.train.1m.tsv"
qrels_filename = "qrels.dev.tsv"
queries_filename = "queries.dev.tsv"
passages_filename = "collection.tsv"
top1000_filename = "top1000.dev"
# 0. Preprocess and save MSMarco data in a format that can be ingested by FARM models. Only needs to be done once!
# The final format is a tsv file with 3 columns (text, text_b and label)
if generate_data:
reformat_msmarco_train(data_dir / train_source_filename,
data_dir / train_filename)
reformat_msmarco_dev(data_dir / queries_filename,
data_dir / passages_filename,
data_dir / qrels_filename,
data_dir / top1000_filename,
data_dir / dev_filename)
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=False)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# Evaluation during training will be performed on a slice of the train set
# We will be using the msmarco dev set as our final evaluation set
processor = TextPairClassificationProcessor(tokenizer=tokenizer,
label_list=label_list,
metric="f1_macro",
train_filename=train_filename,
test_filename=None,
dev_split=0.001,
max_seq_len=128,
data_dir=data_dir,
delimiter="\t")
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model)
# b) and a prediction head on top that is suited for our task
prediction_head = TextClassificationHead(num_labels=len(label_list),
class_weights=data_silo.calculate_class_weights(
task_name="text_classification"),
)
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_sequence_continuous"],
device=device)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
device=device,
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs)
# 6. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device)
# 7. Let it grow
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("saved_models/passage_ranking_model")
model.save(save_dir)
processor.save(save_dir)
# 9. Load it & harvest your fruits (Inference)
# Add your own text adapted to the dataset you provide
model = Inferencer.load(save_dir, gpu=True, max_seq_len=128, batch_size=128)
result = model.inference_from_file(data_dir / dev_filename)
write_msmarco_results(result, save_dir / predictions_raw_filename)
msmarco_evaluation(preds_file=save_dir / predictions_raw_filename,
dev_file=data_dir / dev_filename,
qrels_file=data_dir / qrels_filename,
output_file=save_dir / predictions_filename)
model.close_multiprocessing_pool()
if __name__ == "__main__":
text_pair_classification()
# fmt: on
| [
"logging.basicConfig",
"farm.data_handler.data_silo.DataSilo",
"farm.utils.reformat_msmarco_dev",
"pathlib.Path",
"farm.utils.write_msmarco_results",
"farm.utils.set_all_seeds",
"farm.evaluation.msmarco_passage_farm.msmarco_evaluation",
"farm.infer.Inferencer.load",
"farm.modeling.tokenization.Token... | [((802, 945), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (821, 945), False, 'import logging\n'), ((978, 1040), 'farm.utils.MLFlowLogger', 'MLFlowLogger', ([], {'tracking_uri': '"""https://public-mlflow.deepset.ai/"""'}), "(tracking_uri='https://public-mlflow.deepset.ai/')\n", (990, 1040), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n'), ((1234, 1256), 'farm.utils.set_all_seeds', 'set_all_seeds', ([], {'seed': '(42)'}), '(seed=42)\n', (1247, 1256), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n'), ((1277, 1318), 'farm.utils.initialize_device_settings', 'initialize_device_settings', ([], {'use_cuda': '(True)'}), '(use_cuda=True)\n', (1303, 1318), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n'), ((1646, 1677), 'pathlib.Path', 'Path', (['"""../data/msmarco_passage"""'], {}), "('../data/msmarco_passage')\n", (1650, 1677), False, 'from pathlib import Path\n'), ((2654, 2731), 'farm.modeling.tokenization.Tokenizer.load', 'Tokenizer.load', ([], {'pretrained_model_name_or_path': 'lang_model', 'do_lower_case': '(False)'}), '(pretrained_model_name_or_path=lang_model, do_lower_case=False)\n', (2668, 2731), False, 'from farm.modeling.tokenization import Tokenizer\n'), ((3023, 3245), 'farm.data_handler.processor.TextPairClassificationProcessor', 'TextPairClassificationProcessor', ([], {'tokenizer': 'tokenizer', 'label_list': 'label_list', 'metric': '"""f1_macro"""', 'train_filename': 'train_filename', 'test_filename': 'None', 'dev_split': '(0.001)', 'max_seq_len': '(128)', 'data_dir': 'data_dir', 'delimiter': '"""\t"""'}), "(tokenizer=tokenizer, label_list=label_list,\n metric='f1_macro', train_filename=train_filename, test_filename=None,\n dev_split=0.001, max_seq_len=128, data_dir=data_dir, delimiter='\\t')\n", (3054, 3245), False, 'from farm.data_handler.processor import RegressionProcessor, TextPairClassificationProcessor\n'), ((3802, 3854), 'farm.data_handler.data_silo.DataSilo', 'DataSilo', ([], {'processor': 'processor', 'batch_size': 'batch_size'}), '(processor=processor, batch_size=batch_size)\n', (3810, 3854), False, 'from farm.data_handler.data_silo import DataSilo\n'), ((3993, 4023), 'farm.modeling.language_model.LanguageModel.load', 'LanguageModel.load', (['lang_model'], {}), '(lang_model)\n', (4011, 4023), False, 'from farm.modeling.language_model import LanguageModel\n'), ((4400, 4575), 'farm.modeling.adaptive_model.AdaptiveModel', 'AdaptiveModel', ([], {'language_model': 'language_model', 'prediction_heads': '[prediction_head]', 'embeds_dropout_prob': '(0.1)', 'lm_output_types': "['per_sequence_continuous']", 'device': 'device'}), "(language_model=language_model, prediction_heads=[\n prediction_head], embeds_dropout_prob=0.1, lm_output_types=[\n 'per_sequence_continuous'], device=device)\n", (4413, 4575), False, 'from farm.modeling.adaptive_model import AdaptiveModel\n'), ((4994, 5167), 'farm.train.Trainer', 'Trainer', ([], {'model': 'model', 'optimizer': 'optimizer', 'data_silo': 'data_silo', 'epochs': 'n_epochs', 'n_gpu': 'n_gpu', 'lr_schedule': 'lr_schedule', 'evaluate_every': 'evaluate_every', 'device': 'device'}), '(model=model, optimizer=optimizer, data_silo=data_silo, epochs=\n n_epochs, n_gpu=n_gpu, lr_schedule=lr_schedule, evaluate_every=\n evaluate_every, device=device)\n', (5001, 5167), False, 'from farm.train import Trainer\n'), ((5326, 5368), 'pathlib.Path', 'Path', (['"""saved_models/passage_ranking_model"""'], {}), "('saved_models/passage_ranking_model')\n", (5330, 5368), False, 'from pathlib import Path\n'), ((5549, 5617), 'farm.infer.Inferencer.load', 'Inferencer.load', (['save_dir'], {'gpu': '(True)', 'max_seq_len': '(128)', 'batch_size': '(128)'}), '(save_dir, gpu=True, max_seq_len=128, batch_size=128)\n', (5564, 5617), False, 'from farm.infer import Inferencer\n'), ((5687, 5753), 'farm.utils.write_msmarco_results', 'write_msmarco_results', (['result', '(save_dir / predictions_raw_filename)'], {}), '(result, save_dir / predictions_raw_filename)\n', (5708, 5753), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n'), ((5759, 5951), 'farm.evaluation.msmarco_passage_farm.msmarco_evaluation', 'msmarco_evaluation', ([], {'preds_file': '(save_dir / predictions_raw_filename)', 'dev_file': '(data_dir / dev_filename)', 'qrels_file': '(data_dir / qrels_filename)', 'output_file': '(save_dir / predictions_filename)'}), '(preds_file=save_dir / predictions_raw_filename, dev_file\n =data_dir / dev_filename, qrels_file=data_dir / qrels_filename,\n output_file=save_dir / predictions_filename)\n', (5777, 5951), False, 'from farm.evaluation.msmarco_passage_farm import msmarco_evaluation\n'), ((2210, 2297), 'farm.utils.reformat_msmarco_train', 'reformat_msmarco_train', (['(data_dir / train_source_filename)', '(data_dir / train_filename)'], {}), '(data_dir / train_source_filename, data_dir /\n train_filename)\n', (2232, 2297), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n'), ((2333, 2501), 'farm.utils.reformat_msmarco_dev', 'reformat_msmarco_dev', (['(data_dir / queries_filename)', '(data_dir / passages_filename)', '(data_dir / qrels_filename)', '(data_dir / top1000_filename)', '(data_dir / dev_filename)'], {}), '(data_dir / queries_filename, data_dir /\n passages_filename, data_dir / qrels_filename, data_dir /\n top1000_filename, data_dir / dev_filename)\n', (2353, 2501), False, 'from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import json
import pytz
def _get_data(file):
return pd.read_csv(file)
def _get_prices(data):
df = data
rome_tz = pytz.timezone('Europe/Rome')
df['time'] = pd.to_datetime(df['Timestamp'], unit='s')
df['time'].dt.tz_localize(pytz.UTC).dt.tz_convert(rome_tz)
del(df['Timestamp'])
del(df['Weighted_Price'])
df = df.rename(columns={'Volume_(BTC)': 'volume_btc', 'Volume_(Currency)': 'volume_fiat'})
df = df.rename(columns={'Open': 'open', 'Close': 'close'})
df = df.rename(columns={'Low': 'low', 'High': 'high'})
return df
def _group(data, step=4):
data['group_info'] = ['data' if (index+1)%step != 0 else 'target' for index, _ in data.iterrows()]
data['type'] = data['group_info'].astype('category')
del(data['group_info'])
return data
def _bundle_groups(data, index, group_size):
return np.concatenate([data.iloc[index + a] for a in range(0, group_size)])
def scale(data_frame):
data_frame -= data_frame.min()
data_frame /= data_frame.max()
return data_frame
def remove_fields(data, fields):
for field in fields:
del(data[field])
return data
def split_to_X_y(data, groups_size):
semi_grouped = _group(data, step=groups_size)
grouped_data = semi_grouped.loc[semi_grouped['type'] == 'data']
grouped_targets = semi_grouped.loc[semi_grouped['type'] == 'target']
del(grouped_data['type'])
del(grouped_targets['type'])
# Make them their own DataFrame to avoid operating on copies of `semi_grouped` one:
grouped_data = grouped_data.copy()
grouped_targets = grouped_targets.copy()
usable_items = groups_size - 1
X = [_bundle_groups(grouped_data, index, usable_items) for index in range(0, len(grouped_data), usable_items)]
y = grouped_targets['close'].values.tolist()
return X, y
def cut_trailing(data, groups_size=4):
# Cut trailing data (remember that we are grouping by 'groups_size'):
while len(data) % groups_size > 0:
data = data.drop(len(data) - 1)
return data
def load():
""" Returns `X` and `y` arrays, the former being the training data and the former the targets. """
# Get data:
data = _get_data('coinbaseUSD_1-min_data_2014-12-01_to_2018-03-27.csv')
prices = _get_prices(data)
prices['day_of_week'] = prices['time'].dt.dayofweek
prices['day_of_month'] = prices['time'].dt.day
prices['day_of_month_scaled'] = prices['time'].dt.day / prices['time'].dt.days_in_month
prices['month'] = prices['time'].dt.month
prices['time_of_day'] = prices['time'].dt.time.apply(lambda time: str(time).split(':')[0]).astype(int)
return prices
| [
"pytz.timezone",
"pandas.to_datetime",
"pandas.read_csv"
] | [((120, 137), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (131, 137), True, 'import pandas as pd\n'), ((193, 221), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Rome"""'], {}), "('Europe/Rome')\n", (206, 221), False, 'import pytz\n'), ((240, 281), 'pandas.to_datetime', 'pd.to_datetime', (["df['Timestamp']"], {'unit': '"""s"""'}), "(df['Timestamp'], unit='s')\n", (254, 281), True, 'import pandas as pd\n')] |
import sys
from django.core.management import CommandError, call_command
from django.test import TestCase
from .side_effects import bad_database_check
try:
from unittest.mock import patch
except ImportError:
from mock import patch
# Python 2.7 support
if sys.version_info > (3, 0):
from io import StringIO
else:
from io import BytesIO as StringIO
class CommandTestCase(TestCase):
def test_command(self):
out = StringIO()
call_command("healthcheck", stdout=out)
self.assertIn("OK", out.getvalue())
def test_command_failed(self):
with patch(
"django_alive.checks.check_database", side_effect=bad_database_check
):
with self.assertRaises(CommandError):
call_command("healthcheck")
| [
"mock.patch",
"io.BytesIO",
"django.core.management.call_command"
] | [((444, 454), 'io.BytesIO', 'StringIO', ([], {}), '()\n', (452, 454), True, 'from io import BytesIO as StringIO\n'), ((463, 502), 'django.core.management.call_command', 'call_command', (['"""healthcheck"""'], {'stdout': 'out'}), "('healthcheck', stdout=out)\n", (475, 502), False, 'from django.core.management import CommandError, call_command\n'), ((596, 671), 'mock.patch', 'patch', (['"""django_alive.checks.check_database"""'], {'side_effect': 'bad_database_check'}), "('django_alive.checks.check_database', side_effect=bad_database_check)\n", (601, 671), False, 'from mock import patch\n'), ((761, 788), 'django.core.management.call_command', 'call_command', (['"""healthcheck"""'], {}), "('healthcheck')\n", (773, 788), False, 'from django.core.management import CommandError, call_command\n')] |
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# importing the modules
import os.path
import shutil
import datetime
import time
import re
# getting the current working directory
src_dir = os.getcwd()
# printing current directory
print("########## File-backup started ###########")
class FileHandler(FileSystemEventHandler):
def on_modified(self, event):
if not event.is_directory:
self.copy_file(event.src_path)
def on_created(self, event):
if not event.is_directory:
self.copy_file(event.src_path)
def copy_file(self, src):
src_file_name = src.split(os.path.sep).pop()
destination_sub_path = self.extract_changed_sub_path(folder_to_track, src)
sub_path_list = destination_sub_path.split(os.path.sep)
changed_file_name = sub_path_list.pop()
path_to_file = f"{os.path.sep.join(sub_path_list)}{os.path.sep}"
timestamp = datetime.datetime.now().strftime("%d-%m-%y-%H-%M") # not the prettiest datetime-format, but it's filename-friendly
target = f"{destination}{path_to_file}{timestamp}-{changed_file_name}"
print(os.linesep)
print(src)
print(" |")
print(" |")
print(" V")
print(target)
print(os.linesep)
print("----------------------------------------")
os.makedirs(f"{destination}{path_to_file}", exist_ok = True)
shutil.copy(src, target)
def extract_changed_sub_path(self, base_path, changed_path):
# This turns the annoying "\" into "/", in case we are on windows
base_path = base_path.replace(os.path.sep, "/")
changed_path = changed_path.replace(os.path.sep, "/")
# use positive lookbehind assertion to find the part of the path after the base_path of the source
regex = re.compile(f"(?<={base_path})(.*)")
match = re.search(regex, changed_path)
sub_path = match.group().replace("/", os.path.sep)
return sub_path
folder_to_track = f"{os.getcwd()}{os.path.sep}testsubject{os.path.sep}source"
destination = f"{os.getcwd()}{os.path.sep}testsubject{os.path.sep}destination"
print(f"{folder_to_track} --> {destination}")
event_handler = FileHandler()
observer = Observer()
observer.schedule(event_handler, folder_to_track, recursive=True)
observer.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
print("########## File-backup ended ###########") | [
"re.compile",
"time.sleep",
"datetime.datetime.now",
"shutil.copy",
"watchdog.observers.Observer",
"re.search"
] | [((2328, 2338), 'watchdog.observers.Observer', 'Observer', ([], {}), '()\n', (2336, 2338), False, 'from watchdog.observers import Observer\n'), ((1495, 1519), 'shutil.copy', 'shutil.copy', (['src', 'target'], {}), '(src, target)\n', (1506, 1519), False, 'import shutil\n'), ((1912, 1947), 're.compile', 're.compile', (['f"""(?<={base_path})(.*)"""'], {}), "(f'(?<={base_path})(.*)')\n", (1922, 1947), False, 'import re\n'), ((1964, 1994), 're.search', 're.search', (['regex', 'changed_path'], {}), '(regex, changed_path)\n', (1973, 1994), False, 'import re\n'), ((2452, 2466), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2462, 2466), False, 'import time\n'), ((981, 1004), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1002, 1004), False, 'import datetime\n')] |
import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict={},
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn("gloo", num_workers_per_machine=nproc, device=device, init_method=init_method)
if device.type == "cpu":
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, start_method="fork", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
num_workers_per_machine = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=num_workers_per_machine, device="cuda", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
| [
"torch.distributed.destroy_process_group",
"torch.cuda.device_count",
"torch.cuda.is_available",
"datetime.timedelta",
"torch.distributed.is_available",
"ignite.distributed.utils._model.get_world_size",
"torch.distributed.barrier",
"ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env"... | [((468, 2074), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hostlist, expected"""', "[('localhost', 'localhost'), ('compute!:b24_[1-2].r',\n 'compute!:b24_1.r,compute!:b24_2.r'), ('quartz[4-8]',\n 'quartz4,quartz5,quartz6,quartz7,quartz8'), ('c1001a-[11,17]',\n 'c1001a-11,c1001a-17'), ('c1001a-s[11,17]', 'c1001a-s11,c1001a-s17'), (\n 'c1009a-s17,c1010a-s11', 'c1009a-s17,c1010a-s11'), (\n 'gpu-compute-on-demand-dy-g4dnxlarge-[1-4]',\n 'gpu-compute-on-demand-dy-g4dnxlarge-1,gpu-compute-on-demand-dy-g4dnxlarge-2,gpu-compute-on-demand-dy-g4dnxlarge-3,gpu-compute-on-demand-dy-g4dnxlarge-4'\n ), ('node[18-19,1-16,21-22]',\n 'node1,node2,node3,node4,node5,node6,node7,node8,node9,node10,node11,node12,node13,node14,node15,node16,node18,node19,node21,node22'\n ), ('node[4-8,12,16-20,22,24-26]',\n 'node4,node5,node6,node7,node8,node12,node16,node17,node18,node19,node20,node22,node24,node25,node26'\n ), ('machine2-[02-4]vm1',\n 'machine2-02vm1,machine2-03vm1,machine2-04vm1'), (\n 'machine2-[02-3]vm1, machine4-[0003-5].vml2',\n 'machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2'\n ), ('machine2-[009-11]vm1',\n 'machine2-009vm1,machine2-010vm1,machine2-011vm1'), ('node[1,2,3]',\n 'node1,node2,node3'), (\n 'compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]',\n 'compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13'\n )]"], {}), "('hostlist, expected', [('localhost', 'localhost'),\n ('compute!:b24_[1-2].r', 'compute!:b24_1.r,compute!:b24_2.r'), (\n 'quartz[4-8]', 'quartz4,quartz5,quartz6,quartz7,quartz8'), (\n 'c1001a-[11,17]', 'c1001a-11,c1001a-17'), ('c1001a-s[11,17]',\n 'c1001a-s11,c1001a-s17'), ('c1009a-s17,c1010a-s11',\n 'c1009a-s17,c1010a-s11'), ('gpu-compute-on-demand-dy-g4dnxlarge-[1-4]',\n 'gpu-compute-on-demand-dy-g4dnxlarge-1,gpu-compute-on-demand-dy-g4dnxlarge-2,gpu-compute-on-demand-dy-g4dnxlarge-3,gpu-compute-on-demand-dy-g4dnxlarge-4'\n ), ('node[18-19,1-16,21-22]',\n 'node1,node2,node3,node4,node5,node6,node7,node8,node9,node10,node11,node12,node13,node14,node15,node16,node18,node19,node21,node22'\n ), ('node[4-8,12,16-20,22,24-26]',\n 'node4,node5,node6,node7,node8,node12,node16,node17,node18,node19,node20,node22,node24,node25,node26'\n ), ('machine2-[02-4]vm1',\n 'machine2-02vm1,machine2-03vm1,machine2-04vm1'), (\n 'machine2-[02-3]vm1, machine4-[0003-5].vml2',\n 'machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2'\n ), ('machine2-[009-11]vm1',\n 'machine2-009vm1,machine2-010vm1,machine2-011vm1'), ('node[1,2,3]',\n 'node1,node2,node3'), (\n 'compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]',\n 'compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13'\n )])\n", (491, 2074), False, 'import pytest\n'), ((3480, 3571), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (3498, 3571), False, 'import pytest\n'), ((3799, 3890), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (3817, 3890), False, 'import pytest\n'), ((4286, 4377), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (4304, 4377), False, 'import pytest\n'), ((12766, 12852), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Should be no-dist config"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Should be no-dist config')\n", (12784, 12852), False, 'import pytest\n'), ((13158, 13244), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Should be no-dist config"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Should be no-dist config')\n", (13176, 13244), False, 'import pytest\n'), ((13626, 13703), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_method"""', "[None, 'tcp://0.0.0.0:22334', 'FILE']"], {}), "('init_method', [None, 'tcp://0.0.0.0:22334', 'FILE'])\n", (13649, 13703), False, 'import pytest\n'), ((14790, 14867), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_method"""', "[None, 'tcp://0.0.0.0:22334', 'FILE']"], {}), "('init_method', [None, 'tcp://0.0.0.0:22334', 'FILE'])\n", (14813, 14867), False, 'import pytest\n'), ((17382, 17473), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (17400, 17473), False, 'import pytest\n'), ((17470, 17561), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_method"""', "[None, 'env://', 'tcp://0.0.0.0:22334', 'FILE']"], {}), "('init_method', [None, 'env://',\n 'tcp://0.0.0.0:22334', 'FILE'])\n", (17493, 17561), False, 'import pytest\n'), ((18180, 18271), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (18198, 18271), False, 'import pytest\n'), ((18344, 18421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_method"""', "[None, 'tcp://0.0.0.0:22334', 'FILE']"], {}), "('init_method', [None, 'tcp://0.0.0.0:22334', 'FILE'])\n", (18367, 18421), False, 'import pytest\n'), ((18794, 18885), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (18812, 18885), False, 'import pytest\n'), ((18882, 18975), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (18900, 18975), False, 'import pytest\n'), ((19235, 19326), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('WORLD_SIZE' in os.environ)"], {'reason': '"""Skip if launched as multiproc"""'}), "('WORLD_SIZE' in os.environ, reason=\n 'Skip if launched as multiproc')\n", (19253, 19326), False, 'import pytest\n'), ((19323, 19416), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_native_dist_support)'], {'reason': '"""Skip if no native dist support"""'}), "(not has_native_dist_support, reason=\n 'Skip if no native dist support')\n", (19341, 19416), False, 'import pytest\n'), ((20004, 21292), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""environ, expected"""', "[({'SLURM_PROCID': '1', 'SLURM_LOCALID': '1', 'SLURM_NTASKS': '2',\n 'SLURM_JOB_NUM_NODES': '1', 'SLURM_JOB_NODELIST': 'c1', 'SLURM_JOB_ID':\n '12345'}, [1, 1, 2, 'c1', 17345]), ({'SLURM_PROCID': '5',\n 'SLURM_LOCALID': '1', 'SLURM_NTASKS': '8', 'SLURM_JOB_NUM_NODES': '2',\n 'SLURM_JOB_NODELIST': 'c1, c2', 'SLURM_JOB_ID': '12345'}, [5, 1, 8,\n 'c1', 17345]), ({'SLURM_PROCID': '0', 'SLURM_LOCALID': '0',\n 'SLURM_NTASKS': '1', 'SLURM_JOB_NUM_NODES': '1', 'SLURM_JOB_NODELIST':\n 'c1', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR': '127.0.0.1',\n 'MASTER_PORT': '2233', 'RANK': '2', 'LOCAL_RANK': '2', 'WORLD_SIZE':\n '8'}, [2, 2, 8, '127.0.0.1', 2233]), ({'SLURM_PROCID': '3',\n 'SLURM_LOCALID': '3', 'SLURM_NTASKS': '4', 'SLURM_JOB_NUM_NODES': '1',\n 'SLURM_JOB_NODELIST': 'c1', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR':\n 'c1', 'MASTER_PORT': '12233', 'RANK': '3', 'LOCAL_RANK': '3',\n 'WORLD_SIZE': '4'}, [3, 3, 4, 'c1', 12233]), ({'SLURM_PROCID': '3',\n 'SLURM_LOCALID': '1', 'SLURM_NTASKS': '4', 'SLURM_JOB_NUM_NODES': '2',\n 'SLURM_JOB_NODELIST': 'c1, c2', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR':\n 'c1', 'MASTER_PORT': '12233', 'RANK': '3', 'LOCAL_RANK': '1',\n 'WORLD_SIZE': '4'}, [3, 1, 4, 'c1', 12233])]"], {}), "('environ, expected', [({'SLURM_PROCID': '1',\n 'SLURM_LOCALID': '1', 'SLURM_NTASKS': '2', 'SLURM_JOB_NUM_NODES': '1',\n 'SLURM_JOB_NODELIST': 'c1', 'SLURM_JOB_ID': '12345'}, [1, 1, 2, 'c1', \n 17345]), ({'SLURM_PROCID': '5', 'SLURM_LOCALID': '1', 'SLURM_NTASKS':\n '8', 'SLURM_JOB_NUM_NODES': '2', 'SLURM_JOB_NODELIST': 'c1, c2',\n 'SLURM_JOB_ID': '12345'}, [5, 1, 8, 'c1', 17345]), ({'SLURM_PROCID':\n '0', 'SLURM_LOCALID': '0', 'SLURM_NTASKS': '1', 'SLURM_JOB_NUM_NODES':\n '1', 'SLURM_JOB_NODELIST': 'c1', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR':\n '127.0.0.1', 'MASTER_PORT': '2233', 'RANK': '2', 'LOCAL_RANK': '2',\n 'WORLD_SIZE': '8'}, [2, 2, 8, '127.0.0.1', 2233]), ({'SLURM_PROCID':\n '3', 'SLURM_LOCALID': '3', 'SLURM_NTASKS': '4', 'SLURM_JOB_NUM_NODES':\n '1', 'SLURM_JOB_NODELIST': 'c1', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR':\n 'c1', 'MASTER_PORT': '12233', 'RANK': '3', 'LOCAL_RANK': '3',\n 'WORLD_SIZE': '4'}, [3, 3, 4, 'c1', 12233]), ({'SLURM_PROCID': '3',\n 'SLURM_LOCALID': '1', 'SLURM_NTASKS': '4', 'SLURM_JOB_NUM_NODES': '2',\n 'SLURM_JOB_NODELIST': 'c1, c2', 'SLURM_JOB_ID': '12345', 'MASTER_ADDR':\n 'c1', 'MASTER_PORT': '12233', 'RANK': '3', 'LOCAL_RANK': '1',\n 'WORLD_SIZE': '4'}, [3, 1, 4, 'c1', 12233])])\n", (20027, 21292), False, 'import pytest\n'), ((176, 246), 'pytest.skip', 'pytest.skip', (['"""Skip if no native dist support"""'], {'allow_module_level': '(True)'}), "('Skip if no native dist support', allow_module_level=True)\n", (187, 246), False, 'import pytest\n'), ((2844, 2868), 'torch.distributed.is_nccl_available', 'dist.is_nccl_available', ([], {}), '()\n', (2866, 2868), True, 'import torch.distributed as dist\n'), ((2980, 3004), 'torch.distributed.is_gloo_available', 'dist.is_gloo_available', ([], {}), '()\n', (3002, 3004), True, 'import torch.distributed as dist\n'), ((3116, 3139), 'torch.distributed.is_mpi_available', 'dist.is_mpi_available', ([], {}), '()\n', (3137, 3139), True, 'import torch.distributed as dist\n'), ((6923, 6944), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (6932, 6944), False, 'from datetime import timedelta\n'), ((7080, 7179), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': 'backend', 'timeout': 'timeout', 'init_method': 'init_method'}), '(backend=backend, timeout=timeout,\n init_method=init_method)\n', (7116, 7179), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((8243, 8264), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (8252, 8264), False, 'from datetime import timedelta\n'), ((8711, 8781), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': 'backend', 'timeout': 'timeout'}), '(backend=backend, timeout=timeout)\n', (8747, 8781), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((10575, 10613), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (10611, 10613), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((11065, 11103), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (11101, 11103), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((11404, 11489), 'torch.distributed.init_process_group', 'dist.init_process_group', (['true_backend', '"""tcp://0.0.0.0:2222"""'], {'world_size': '(1)', 'rank': '(0)'}), "(true_backend, 'tcp://0.0.0.0:2222', world_size=1,\n rank=0)\n", (11427, 11489), True, 'import torch.distributed as dist\n'), ((11490, 11504), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (11502, 11504), True, 'import torch.distributed as dist\n'), ((11927, 11955), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (11953, 11955), True, 'import torch.distributed as dist\n'), ((12134, 12232), 'torch.distributed.init_process_group', 'dist.init_process_group', (['true_backend', '"""tcp://0.0.0.0:2222"""'], {'world_size': 'world_size', 'rank': 'rank'}), "(true_backend, 'tcp://0.0.0.0:2222', world_size=\n world_size, rank=rank)\n", (12157, 12232), True, 'import torch.distributed as dist\n'), ((12232, 12246), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (12244, 12246), True, 'import torch.distributed as dist\n'), ((12254, 12279), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12277, 12279), False, 'import torch\n'), ((12709, 12737), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (12735, 12737), True, 'import torch.distributed as dist\n'), ((15979, 16076), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""nccl"""', '"""tcp://0.0.0.0:2222"""'], {'world_size': 'world_size', 'rank': 'local_rank'}), "('nccl', 'tcp://0.0.0.0:2222', world_size=world_size,\n rank=local_rank)\n", (16002, 16076), True, 'import torch.distributed as dist\n'), ((16077, 16091), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (16089, 16091), True, 'import torch.distributed as dist\n'), ((16151, 16175), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (16172, 16175), False, 'import torch\n'), ((16189, 16227), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (16225, 16227), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((16474, 16502), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (16500, 16502), True, 'import torch.distributed as dist\n'), ((17084, 17303), 'ignite.distributed.comp_models.native._NativeDistModel.spawn', '_NativeDistModel.spawn', (['_test_dist_spawn_fn'], {'args': '(backend, num_workers_per_machine, device)', 'kwargs_dict': '{}', 'backend': 'backend', 'nproc_per_node': 'num_workers_per_machine', 'init_method': 'init_method'}), '(_test_dist_spawn_fn, args=(backend,\n num_workers_per_machine, device), kwargs_dict={}, backend=backend,\n nproc_per_node=num_workers_per_machine, init_method=init_method, **\n spawn_kwargs)\n', (17106, 17303), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((18594, 18619), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (18617, 18619), False, 'import torch\n'), ((22161, 22200), 'ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env', '_setup_ddp_vars_from_slurm_env', (['environ'], {}), '(environ)\n', (22191, 22200), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((2528, 2554), 'ignite.distributed.comp_models.native._expand_hostlist', '_expand_hostlist', (['hostlist'], {}), '(hostlist)\n', (2544, 2554), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((2625, 2676), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""hostlist invalid"""'}), "(ValueError, match='hostlist invalid')\n", (2638, 2676), False, 'import pytest\n'), ((2687, 2716), 'ignite.distributed.comp_models.native._expand_hostlist', '_expand_hostlist', (['"""invalid[]"""'], {}), "('invalid[]')\n", (2703, 2716), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((3251, 3310), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Backend should be one of"""'}), "(ValueError, match='Backend should be one of')\n", (3264, 3310), False, 'import pytest\n'), ((3321, 3364), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', (['"""abc"""'], {}), "('abc')\n", (3357, 3364), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((3638, 3732), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Nccl backend is required but no cuda capable devices"""'}), "(RuntimeError, match=\n 'Nccl backend is required but no cuda capable devices')\n", (3651, 3732), False, 'import pytest\n'), ((3738, 3770), 'ignite.distributed.comp_models.native._NativeDistModel', '_NativeDistModel', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (3754, 3770), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((3416, 3440), 'torch.distributed.is_nccl_available', 'dist.is_nccl_available', ([], {}), '()\n', (3438, 3440), True, 'import torch.distributed as dist\n'), ((4037, 4140), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""PyTorch distributed configuration should define env variables"""'}), "(RuntimeError, match=\n 'PyTorch distributed configuration should define env variables')\n", (4050, 4140), False, 'import pytest\n'), ((4538, 4617), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""SLURM distributed configuration is missing"""'}), "(RuntimeError, match='SLURM distributed configuration is missing')\n", (4551, 4617), False, 'import pytest\n'), ((4722, 4826), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Arguments rank and world_size should not be specified with SLURM"""'}), "(ValueError, match=\n 'Arguments rank and world_size should not be specified with SLURM')\n", (4735, 4826), False, 'import pytest\n'), ((5224, 5298), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""We detected the following env variables"""'}), "(UserWarning, match='We detected the following env variables')\n", (5236, 5298), False, 'import pytest\n'), ((5747, 5780), 'torch.device', 'torch.device', (["true_conf['device']"], {}), "(true_conf['device'])\n", (5759, 5780), False, 'import torch\n'), ((6362, 6381), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (6379, 6381), True, 'import torch.distributed as dist\n'), ((6386, 6407), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (6405, 6407), True, 'import torch.distributed as dist\n'), ((6419, 6437), 'torch.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (6435, 6437), True, 'import torch.distributed as dist\n'), ((7188, 7207), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (7205, 7207), True, 'import torch.distributed as dist\n'), ((7212, 7233), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (7231, 7233), True, 'import torch.distributed as dist\n'), ((7245, 7263), 'torch.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (7261, 7263), True, 'import torch.distributed as dist\n'), ((7285, 7389), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Can not create new distributed process group if default one is"""'}), "(RuntimeError, match=\n 'Can not create new distributed process group if default one is')\n", (7298, 7389), False, 'import pytest\n'), ((7395, 7465), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': 'backend', 'timeout': 'timeout'}), '(backend=backend, timeout=timeout)\n', (7431, 7465), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((8794, 8813), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (8811, 8813), True, 'import torch.distributed as dist\n'), ((8818, 8839), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (8837, 8839), True, 'import torch.distributed as dist\n'), ((8851, 8869), 'torch.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (8867, 8869), True, 'import torch.distributed as dist\n'), ((8891, 8995), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Can not create new distributed process group if default one is"""'}), "(RuntimeError, match=\n 'Can not create new distributed process group if default one is')\n", (8904, 8995), False, 'import pytest\n'), ((9001, 9071), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': 'backend', 'timeout': 'timeout'}), '(backend=backend, timeout=timeout)\n', (9037, 9071), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((10155, 10273), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Local rank information for native distributed setting will be initialized"""'}), "(UserWarning, match=\n 'Local rank information for native distributed setting will be initialized'\n )\n", (10167, 10273), False, 'import pytest\n'), ((10274, 10312), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (10310, 10312), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((11352, 11390), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (11388, 11390), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((12082, 12120), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (12118, 12120), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((12289, 12322), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (12310, 12322), False, 'import torch\n'), ((13260, 13285), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (13283, 13285), False, 'import torch\n'), ((14733, 14758), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14756, 14758), False, 'import torch\n'), ((15472, 15497), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15495, 15497), False, 'import torch\n'), ((15927, 15965), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_context', '_NativeDistModel.create_from_context', ([], {}), '()\n', (15963, 15965), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((15767, 15792), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15790, 15792), False, 'import torch\n'), ((16631, 16650), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (16648, 16650), True, 'import torch.distributed as dist\n'), ((16655, 16676), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (16674, 16676), True, 'import torch.distributed as dist\n'), ((16688, 16706), 'torch.distributed.get_backend', 'dist.get_backend', ([], {}), '()\n', (16704, 16706), True, 'import torch.distributed as dist\n'), ((16818, 16841), 'ignite.distributed.utils._model.get_local_rank', '_model.get_local_rank', ([], {}), '()\n', (16839, 16841), False, 'from ignite.distributed.utils import _model\n'), ((16867, 16890), 'ignite.distributed.utils._model.get_world_size', '_model.get_world_size', ([], {}), '()\n', (16888, 16890), False, 'from ignite.distributed.utils import _model\n'), ((17741, 17766), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17764, 17766), False, 'import torch\n'), ((17712, 17737), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (17735, 17737), False, 'import torch\n'), ((18287, 18312), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (18310, 18312), False, 'import torch\n'), ((19041, 19120), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Arguments rank and world_size should be None"""'}), "(ValueError, match='Arguments rank and world_size should be None')\n", (19054, 19120), False, 'import pytest\n'), ((19131, 19206), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': '"""gloo"""', 'world_size': 'world_size'}), "(backend='gloo', world_size=world_size)\n", (19167, 19206), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((19617, 19695), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Both rank and world_size should be provided"""'}), "(ValueError, match='Both rank and world_size should be provided')\n", (19630, 19695), False, 'import pytest\n'), ((19706, 19810), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': '"""gloo"""', 'world_size': 'world_size', 'init_method': 'init_method'}), "(backend='gloo', world_size=world_size,\n init_method=init_method)\n", (19742, 19810), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((19817, 19895), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Both rank and world_size should be provided"""'}), "(ValueError, match='Both rank and world_size should be provided')\n", (19830, 19895), False, 'import pytest\n'), ((19906, 20004), 'ignite.distributed.comp_models.native._NativeDistModel.create_from_backend', '_NativeDistModel.create_from_backend', ([], {'backend': '"""gloo"""', 'rank': 'local_rank', 'init_method': 'init_method'}), "(backend='gloo', rank=local_rank,\n init_method=init_method)\n", (19942, 20004), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((22383, 22506), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Environment variable defined for PyTorch Distributed context is inconsistent"""'}), "(RuntimeError, match=\n 'Environment variable defined for PyTorch Distributed context is inconsistent'\n )\n", (22396, 22506), False, 'import pytest\n'), ((22938, 22977), 'ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env', '_setup_ddp_vars_from_slurm_env', (['environ'], {}), '(environ)\n', (22968, 22977), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((22988, 23111), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Environment variable defined for PyTorch Distributed context is inconsistent"""'}), "(RuntimeError, match=\n 'Environment variable defined for PyTorch Distributed context is inconsistent'\n )\n", (23001, 23111), False, 'import pytest\n'), ((23539, 23578), 'ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env', '_setup_ddp_vars_from_slurm_env', (['environ'], {}), '(environ)\n', (23569, 23578), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((23589, 23663), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""We detected the following env variables"""'}), "(UserWarning, match='We detected the following env variables')\n", (23601, 23663), False, 'import pytest\n'), ((24012, 24051), 'ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env', '_setup_ddp_vars_from_slurm_env', (['environ'], {}), '(environ)\n', (24042, 24051), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((24062, 24156), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""No hostname detected in SLURM_JOB_NODELIST by ignite"""'}), "(RuntimeError, match=\n 'No hostname detected in SLURM_JOB_NODELIST by ignite')\n", (24075, 24156), False, 'import pytest\n'), ((24409, 24448), 'ignite.distributed.comp_models.native._setup_ddp_vars_from_slurm_env', '_setup_ddp_vars_from_slurm_env', (['environ'], {}), '(environ)\n', (24439, 24448), False, 'from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env\n'), ((6327, 6348), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (6336, 6348), False, 'from datetime import timedelta\n'), ((12946, 12971), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12969, 12971), False, 'import torch\n'), ((13414, 13439), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13437, 13439), False, 'import torch\n'), ((13988, 14013), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14011, 14013), False, 'import torch\n'), ((14435, 14460), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14458, 14460), False, 'import torch\n'), ((16351, 16444), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Current device index is less than current local rank."""'}), "(UserWarning, match=\n 'Current device index is less than current local rank.')\n", (16363, 16444), False, 'import pytest\n'), ((16916, 16931), 'ignite.distributed.utils._model.device', '_model.device', ([], {}), '()\n', (16929, 16931), False, 'from ignite.distributed.utils import _model\n'), ((16940, 16960), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (16952, 16960), False, 'import torch\n'), ((17810, 17835), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17833, 17835), False, 'import torch\n'), ((4207, 4228), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (4216, 4228), False, 'from datetime import timedelta\n'), ((4689, 4710), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (4698, 4710), False, 'from datetime import timedelta\n'), ((4906, 4927), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (4915, 4927), False, 'from datetime import timedelta\n'), ((5378, 5399), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (5387, 5399), False, 'from datetime import timedelta\n')] |
import pandas as pd
from .apriori_opt import apriori as apriori_opt
from .apriori_basic import apriori as apriori_basic
# from memory_profiler import profile
from .utils import log
def get_frequent_items_in_time(tweets, s, r, a, start=None, end=None, basic=False):
if tweets.empty:
return []
if not start:
start = pd.Timestamp(tweets.time.min().date())
if not end:
end = tweets.time.max()
frequent_itemset_f = apriori_basic if basic else apriori_opt
log("File read")
topics_counter = {} # number of times a topic is frequent in a period of time
time_periods = 0
grouper = pd.Grouper(key = "time",
origin = start, freq=f"{a}s")
for group, batch in tweets.groupby(grouper):
if group >= end:
break
log(f"Period of {group}")
frequent_items = frequent_itemset_f(batch.tokens, s)
for i in frequent_items:
topics_counter[i] = topics_counter.get(i, 0) + 1
time_periods += 1
min_support = r * time_periods
return [(i) for i, o in topics_counter.items() if o >= min_support]
| [
"pandas.Grouper"
] | [((634, 684), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""time"""', 'origin': 'start', 'freq': 'f"""{a}s"""'}), "(key='time', origin=start, freq=f'{a}s')\n", (644, 684), True, 'import pandas as pd\n')] |
# Import modules
from __future__ import print_function
import sys
import numpy as np
from polytope import box2poly
from tulip import hybrid
from tulip.abstract import prop2part, discretize
import Interface.DSL as DSL
from Interface import Statechart as dumpsmach
from Interface.Reduce import *
from Interface.Transform import *
print("----------------------------------\n Script options \n----------------------------------")
verbose = 1 # Decrease printed output = 0, increase= 1
print("""----------------------------------\n System Definition \n----------------------------------
-- System Constants
-- System Label State Space & partition
""")
# System constants
input_bound = 1.0
disturbance_bound = 0.1
# The system dynamics
A = np.array([[1., 0, 2., 0], [0, 1., 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])
B = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])
E = np.array([[1., 0, 0, 0], [0, 1., 0, 0], [0, 0, 1., 0], [0, 0, 0, 1.]])
# $x^+=Ax+Bu+E W$
# Size of the sets
X = box2poly([[0, 100.], [0, 100.], [-5, 5.], [-5, 5.]])
U = box2poly(input_bound*np.array([[0, 1], [0, 1], [0, 1], [0, 1]]))
W = box2poly(disturbance_bound*np.array([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]))
print("----------------------------------\n Define system\n----------------------------------")
# Intermezzo polytope tutorial
# https://github.com/tulip-control/polytope/blob/master/doc/tutorial.md
sys_dyn = hybrid.LtiSysDyn(A, B, E, None, U, W, X)
print(str(sys_dyn))
print("----------------------------------\n Define labelling \n----------------------------------")
cprops ={}
cprops["inA"] = box2poly([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inB"] = box2poly([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])
cprops["inObj1"] = box2poly([[15, 35], [30, 70], [-5, 5], [-5, 5]])
cprops["inObj2"] = box2poly([[65, 85], [30, 70], [-5, 5], [-5, 5]])
cpartition = prop2part(X, cprops)
if verbose == 1:
print("partition before refinement")
print(cpartition)
print("---------------------------------\n System partition State Space \n----------------------------------")
disc_dynamics = discretize(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True, conservative=True)
states=[state for (state, label) in disc_dynamics.ts.states.find(with_attr_dict={'ap': {'inA'}})]
disc_dynamics.ts.states.initial|=states
print("----------------------------------\n Define specification \n----------------------------------")
# Specifications
# Environment variables and assumptions
env_vars = list()
env_init = list()
env_safe = list()
env_prog = list()
# System variables and requirements
sys_vars = ['inA', 'inB']
sys_init = ['inA']
sys_safe = ['!inObj1', '!inObj2']
sys_prog = ['inA', 'inB']
(ctrl_modes, grspec) = transform2control(disc_dynamics.ts, statevar='ctrl')
print("----------------------------------\n Combine sys and spec \n----------------------------------")
phi = grspec | spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
phi.qinit = '\A \E'
phi.moore = False
phi.plus_one = False
ctrl = synth.synthesize(phi,ignore_sys_init=True)
#
# print("----------------------------------\n Reduce states \n----------------------------------")
#
# Events_init = {('fullGas', True)}
#
#
# ctrl_red=reduce_mealy(ctrl,relabel=False,outputs={'ctrl'}, prune_set=Events_init, combine_trans=False)
#
print("----------------------------------\n Output results \n----------------------------------")
if verbose == 1:
print(" (Verbose) ")
try:
disc_dynamics.ts.save("cimple_aircraft_orig.png")
ctrl_modes.save("cimple_aircraft_modes.png")
# ctrl_red.save('cimple_aircraft_ctrl_red.png')
ctrl.save("cimple_aircraft_ctrl_orig.png")
print(" (Verbose): saved all Finite State Transition Systems ")
except Exception:
pass
print('nodes in ctrl:')
print(len(ctrl.nodes()))
print(len(ctrl.transitions()))
print('\n')
#
# print('nodes in ctrl_red:')
# print(len(ctrl_red.nodes()))
# print(len(ctrl_red.transitions()))
# print('\n')
#
#
print("----------------------------------\n Convert controller to Xmi \n----------------------------------")
sys.stdout.flush()
# --------------- Writing the statechart -----------
try:
filename = str(__file__)
filename = filename[0:-3] + "_gen"
except NameError:
filename = "test_gen"
# write strategy plus control modes at the same time to a statechart
with open(filename+".xml", "w") as f:
# f.write(dumpsmach.tulip_to_xmi(ctrl_red,ctrl_modes))
f.write(dumpsmach.tulip_to_xmi(ctrl, ctrl_modes)) | [
"tulip.hybrid.LtiSysDyn",
"Interface.Statechart.tulip_to_xmi",
"tulip.abstract.prop2part",
"polytope.box2poly",
"numpy.array",
"sys.stdout.flush",
"tulip.abstract.discretize"
] | [((769, 845), 'numpy.array', 'np.array', (['[[1.0, 0, 2.0, 0], [0, 1.0, 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]]'], {}), '([[1.0, 0, 2.0, 0], [0, 1.0, 0, 2], [0, 0, 0.5, 0], [0, 0, 0, 0.5]])\n', (777, 845), True, 'import numpy as np\n'), ((847, 915), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], [5, -5, 0, 0], [0, 0, 5, -5]])\n', (855, 915), True, 'import numpy as np\n'), ((920, 994), 'numpy.array', 'np.array', (['[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]'], {}), '([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])\n', (928, 994), True, 'import numpy as np\n'), ((1033, 1089), 'polytope.box2poly', 'box2poly', (['[[0, 100.0], [0, 100.0], [-5, 5.0], [-5, 5.0]]'], {}), '([[0, 100.0], [0, 100.0], [-5, 5.0], [-5, 5.0]])\n', (1041, 1089), False, 'from polytope import box2poly\n'), ((1452, 1492), 'tulip.hybrid.LtiSysDyn', 'hybrid.LtiSysDyn', (['A', 'B', 'E', 'None', 'U', 'W', 'X'], {}), '(A, B, E, None, U, W, X)\n', (1468, 1492), False, 'from tulip import hybrid\n'), ((1643, 1698), 'polytope.box2poly', 'box2poly', (['[[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[0, 10], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])\n', (1651, 1698), False, 'from polytope import box2poly\n'), ((1715, 1772), 'polytope.box2poly', 'box2poly', (['[[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[90, 100], [45, 55], [-0.1, 0.1], [-0.1, 0.1]])\n', (1723, 1772), False, 'from polytope import box2poly\n'), ((1793, 1841), 'polytope.box2poly', 'box2poly', (['[[15, 35], [30, 70], [-5, 5], [-5, 5]]'], {}), '([[15, 35], [30, 70], [-5, 5], [-5, 5]])\n', (1801, 1841), False, 'from polytope import box2poly\n'), ((1861, 1909), 'polytope.box2poly', 'box2poly', (['[[65, 85], [30, 70], [-5, 5], [-5, 5]]'], {}), '([[65, 85], [30, 70], [-5, 5], [-5, 5]])\n', (1869, 1909), False, 'from polytope import box2poly\n'), ((1925, 1945), 'tulip.abstract.prop2part', 'prop2part', (['X', 'cprops'], {}), '(X, cprops)\n', (1934, 1945), False, 'from tulip.abstract import prop2part, discretize\n'), ((2155, 2251), 'tulip.abstract.discretize', 'discretize', (['cpartition', 'sys_dyn'], {'N': '(5)', 'min_cell_volume': '(1)', 'closed_loop': '(True)', 'conservative': '(True)'}), '(cpartition, sys_dyn, N=5, min_cell_volume=1, closed_loop=True,\n conservative=True)\n', (2165, 2251), False, 'from tulip.abstract import prop2part, discretize\n'), ((4274, 4292), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4290, 4292), False, 'import sys\n'), ((1111, 1153), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [0, 1], [0, 1]]'], {}), '([[0, 1], [0, 1], [0, 1], [0, 1]])\n', (1119, 1153), True, 'import numpy as np\n'), ((1186, 1240), 'numpy.array', 'np.array', (['[[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]]'], {}), '([[0, 10], [0, 10], [-0.1, 0.1], [-0.1, 0.1]])\n', (1194, 1240), True, 'import numpy as np\n'), ((4641, 4681), 'Interface.Statechart.tulip_to_xmi', 'dumpsmach.tulip_to_xmi', (['ctrl', 'ctrl_modes'], {}), '(ctrl, ctrl_modes)\n', (4663, 4681), True, 'from Interface import Statechart as dumpsmach\n')] |
from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty
from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField
from open_publishing.user import User
from open_publishing.core.enums import ValueStatus
from open_publishing.core.enums import ProvisionRuleRole, ProvisionChannelType, ProvisionChannelBase
from open_publishing.core.enums import ProvisionRuleAlgorithm
from .rule import ProvisionRule
from .filter_list import ProvisionFilterList
class Progression(SequenceItem):
def __init__(self,
threshold,
rate):
super(Progression, self).__init__(ValueStatus.soft)
self.threshold = threshold
self.rate = rate
self._status = ValueStatus.soft
threshold = SequenceItemProperty('threshold')
rate = SequenceItemProperty('rate')
@classmethod
def from_gjp(cls, gjp, database_object):
threshold = gjp['threshold']
rate = gjp['value']
return cls(threshold,
rate)
def to_gjp(self):
return {'threshold': self.threshold,
'value': self.rate}
class ChannelProgressionList(SequenceField):
_item_type = Progression
def __init__(self,
rule):
super(ChannelProgressionList, self).__init__(rule,
'channels.*',
'progressions')
def add(self,
threshold,
value):
self._list.append(Progression(threshold,
value))
self._status = ValueStatus.hard
return self[-1]
def from_gjp(self,
gjp):
self._list = []
for item in gjp['progressions'] if gjp['progressions'] else []:
self._list.append(self._item_type.from_gjp(item, self.database_object))
self._status = ValueStatus.soft
def to_gjp(self):
return [item.to_gjp() for item in self._list]
class ProgressionChannel(SequenceItem):
def __init__(self,
rule,
channel_type,
base,
group):
super(ProgressionChannel, self).__init__(ValueStatus.soft)
self._rule = rule
self.channel_type = channel_type
self.base = base
self.progressions = ChannelProgressionList(self._rule)
self.group = group
self._status = ValueStatus.soft
channel_type = SequenceItemProperty('channel_type')
base = SequenceItemProperty('base')
group = SequenceItemProperty('group')
@property
def rule(self):
return self._rule
@property
def status(self):
if self.progressions.status is ValueStatus.hard:
return ValueStatus.hard
else:
return super(ProgressionChannel, self).status
@classmethod
def from_gjp(cls, gjp, database_object):
channel_type = ProvisionChannelType.from_id(gjp['channel'])
base = ProvisionChannelBase.from_id(gjp['basis'])
group = gjp['group']
res = cls(database_object,
channel_type,
base,
group if group !='' else None)
res.progressions.from_gjp(gjp)
return res
def to_gjp(self):
res = {'channel': self.channel_type.identifier,
'basis': self.base.identifier,
'group': self.group if self.group else '',
'progressions': self.progressions.to_gjp()}
return res
class ProgressionChannelsList(SequenceField):
_item_type = ProgressionChannel
def __init__(self,
rule):
super(ProgressionChannelsList, self).__init__(rule,
'channels.*',
'channels')
def add(self,
rate,
channel_type = ProvisionChannelType.book_and_ebook,
base = ProvisionChannelBase.net_price,
progressions = None,
group = None):
progression_channel = ProgressionChannel(self.database_object,
channel_type,
base,
group)
progression_channel.progressions.add(1, rate)
if progressions is not None:
for threshold, rate in progressions:
progression_channel.progressions.add(threshold, rate)
self._list.append(progression_channel)
self._status = ValueStatus.hard
return self[-1]
class ProgressionRule(ProvisionRule):
def __init__(self,
context,
rule_id):
super(ProgressionRule, self).__init__(context,
rule_id)
self._fields['recipient'] = DatabaseObjectField(parent=self,
aspect='*',
field_locator='recipient_user_id',
dtype=User)
self._fields['role'] = SimpleField(database_object=self,
aspect='*',
field_locator='role',
dtype=ProvisionRuleRole)
self._fields['channels'] = ProgressionChannelsList(rule=self)
recipient = FieldDescriptor('recipient')
role = FieldDescriptor('role')
channels = FieldDescriptor('channels')
class ProgressionList(ProvisionFilterList):
_filter = ProvisionRuleAlgorithm.progression
def __init__(self,
provision_list):
super(ProgressionList, self).__init__(provision_list)
def add(self,
recipient,
role):
with ProgressionRule._create(self._provision_list._database_object._context,
channels=[]) as rule:
rule._algorithm = ProvisionRuleAlgorithm.progression
rule._source_type = 'DOCUMENT'
rule._reference_id = self._provision_list._document.document_id
rule._scope = 'SALE'
rule.recipient = recipient
rule.role = role
new_rule = ProgressionRule(self._provision_list._database_object._context,
rule.rule_id)
self._provision_list._objects[rule.rule_id] = new_rule
self._provision_list._ids.append(rule.rule_id)
return self[-1]
| [
"open_publishing.core.enums.ProvisionChannelBase.from_id",
"open_publishing.core.FieldDescriptor",
"open_publishing.core.SequenceItemProperty",
"open_publishing.core.enums.ProvisionChannelType.from_id",
"open_publishing.core.DatabaseObjectField",
"open_publishing.core.SimpleField"
] | [((782, 815), 'open_publishing.core.SequenceItemProperty', 'SequenceItemProperty', (['"""threshold"""'], {}), "('threshold')\n", (802, 815), False, 'from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty\n'), ((827, 855), 'open_publishing.core.SequenceItemProperty', 'SequenceItemProperty', (['"""rate"""'], {}), "('rate')\n", (847, 855), False, 'from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty\n'), ((2541, 2577), 'open_publishing.core.SequenceItemProperty', 'SequenceItemProperty', (['"""channel_type"""'], {}), "('channel_type')\n", (2561, 2577), False, 'from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty\n'), ((2589, 2617), 'open_publishing.core.SequenceItemProperty', 'SequenceItemProperty', (['"""base"""'], {}), "('base')\n", (2609, 2617), False, 'from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty\n'), ((2630, 2659), 'open_publishing.core.SequenceItemProperty', 'SequenceItemProperty', (['"""group"""'], {}), "('group')\n", (2650, 2659), False, 'from open_publishing.core import SequenceItem, SequenceField, SequenceItemProperty\n'), ((5632, 5660), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""recipient"""'], {}), "('recipient')\n", (5647, 5660), False, 'from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField\n'), ((5672, 5695), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""role"""'], {}), "('role')\n", (5687, 5695), False, 'from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField\n'), ((5711, 5738), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""channels"""'], {}), "('channels')\n", (5726, 5738), False, 'from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField\n'), ((3018, 3062), 'open_publishing.core.enums.ProvisionChannelType.from_id', 'ProvisionChannelType.from_id', (["gjp['channel']"], {}), "(gjp['channel'])\n", (3046, 3062), False, 'from open_publishing.core.enums import ProvisionRuleRole, ProvisionChannelType, ProvisionChannelBase\n'), ((3078, 3120), 'open_publishing.core.enums.ProvisionChannelBase.from_id', 'ProvisionChannelBase.from_id', (["gjp['basis']"], {}), "(gjp['basis'])\n", (3106, 3120), False, 'from open_publishing.core.enums import ProvisionRuleRole, ProvisionChannelType, ProvisionChannelBase\n'), ((4982, 5078), 'open_publishing.core.DatabaseObjectField', 'DatabaseObjectField', ([], {'parent': 'self', 'aspect': '"""*"""', 'field_locator': '"""recipient_user_id"""', 'dtype': 'User'}), "(parent=self, aspect='*', field_locator=\n 'recipient_user_id', dtype=User)\n", (5001, 5078), False, 'from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField\n'), ((5274, 5371), 'open_publishing.core.SimpleField', 'SimpleField', ([], {'database_object': 'self', 'aspect': '"""*"""', 'field_locator': '"""role"""', 'dtype': 'ProvisionRuleRole'}), "(database_object=self, aspect='*', field_locator='role', dtype=\n ProvisionRuleRole)\n", (5285, 5371), False, 'from open_publishing.core import FieldDescriptor, DatabaseObjectField, SimpleField\n')] |
import re
from typing import Dict, Tuple, List, NamedTuple, Optional
from lib.utils.decorators import with_exception_retry
from .helpers.common import (
split_hostport,
get_parsed_variables,
merge_hostport,
random_choice,
)
from .helpers.zookeeper import get_hostname_and_port_from_zk
# TODO: make these configurable?
MAX_URI_FETCH_ATTEMPTS = 10
MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC = 5
class RawHiveConnectionConf(NamedTuple):
# Raw Connection Configuration that's from a string -> dict transformation
hosts: List[Tuple[str, Optional[int]]]
default_db: str
session_variables: Dict[str, str]
conf_list: Dict[str, str]
var_list: Dict[str, str]
class HiveConnectionConf(NamedTuple):
host: str
port: Optional[int]
default_db: str
configuration: Dict[str, str]
def _extract_connection_url(connection_string: str) -> RawHiveConnectionConf:
# Parser for Hive JDBC string
# Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC
match = re.search(
r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$", # noqa: E501
connection_string,
)
hosts = match.group(1)
default_db = match.group(2) or "default"
session_variables = match.group(3) or ""
conf_list = match.group(4) or ""
var_list = match.group(5) or ""
parsed_hosts = []
for hostport in hosts.split(","):
parsed_hosts.append(split_hostport(hostport))
parsed_session_variables = get_parsed_variables(session_variables[1:])
parsed_conf_list = get_parsed_variables(conf_list[1:])
parsed_var_list = get_parsed_variables(var_list[1:])
return RawHiveConnectionConf(
hosts=parsed_hosts,
default_db=default_db,
session_variables=parsed_session_variables,
conf_list=parsed_conf_list,
var_list=parsed_var_list,
)
@with_exception_retry(
max_retry=MAX_URI_FETCH_ATTEMPTS,
get_retry_delay=lambda retry: min(MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC, retry),
)
def get_hive_host_port_from_zk(
connection_conf: RawHiveConnectionConf,
) -> Tuple[str, int]:
zk_quorum = ",".join(
map(lambda hostport: merge_hostport(hostport), connection_conf.hosts)
)
zk_namespace = connection_conf.session_variables.get("zooKeeperNamespace")
raw_server_uris = get_hostname_and_port_from_zk(zk_quorum, zk_namespace) or []
server_uri_dicts = filter(
lambda d: d is not None,
[_server_uri_to_dict(raw_server_uri) for raw_server_uri in raw_server_uris],
)
server_uris = list(map(lambda d: d["serverUri"], server_uri_dicts))
random_server_uri = random_choice(server_uris)
if not random_server_uri:
raise Exception("Failed to get hostname and port from Zookeeper")
return split_hostport(random_server_uri)
def _server_uri_to_dict(server_uri: str) -> Optional[Dict[str, str]]:
match = re.search(r"serverUri=(.*);version=(.*);sequence=(.*)", server_uri)
if match:
return {
"serverUri": match.group(1),
"version": match.group(2),
"sequence": match.group(3),
}
def get_hive_connection_conf(connection_string: str) -> HiveConnectionConf:
hostname = None
port = None
connection_conf = _extract_connection_url(connection_string)
# We use zookeeper to find host name
if connection_conf.session_variables.get("serviceDiscoveryMode") == "zooKeeper":
hostname, port = get_hive_host_port_from_zk(connection_conf)
else: # We just return a normal host
hostname, port = random_choice(connection_conf.hosts, default=(None, None))
return HiveConnectionConf(
host=hostname,
port=port,
default_db=connection_conf.default_db,
configuration=connection_conf.conf_list,
)
| [
"re.search"
] | [((1058, 1298), 're.search', 're.search', (['"""^(?:jdbc:)?hive2:\\\\/\\\\/([\\\\w.-]+(?:\\\\:\\\\d+)?(?:,[\\\\w.-]+(?:\\\\:\\\\d+)?)*)\\\\/(\\\\w*)((?:;[\\\\w.-]+=[\\\\w.-]+)*)(\\\\?[\\\\w.-]+=[\\\\w.-]+(?:;[\\\\w.-]+=[\\\\w.-]+)*)?(\\\\#[\\\\w.-]+=[\\\\w.-]+(?:;[\\\\w.-]+=[\\\\w.-]+)*)?$"""', 'connection_string'], {}), "(\n '^(?:jdbc:)?hive2:\\\\/\\\\/([\\\\w.-]+(?:\\\\:\\\\d+)?(?:,[\\\\w.-]+(?:\\\\:\\\\d+)?)*)\\\\/(\\\\w*)((?:;[\\\\w.-]+=[\\\\w.-]+)*)(\\\\?[\\\\w.-]+=[\\\\w.-]+(?:;[\\\\w.-]+=[\\\\w.-]+)*)?(\\\\#[\\\\w.-]+=[\\\\w.-]+(?:;[\\\\w.-]+=[\\\\w.-]+)*)?$'\n , connection_string)\n", (1067, 1298), False, 'import re\n'), ((3054, 3120), 're.search', 're.search', (['"""serverUri=(.*);version=(.*);sequence=(.*)"""', 'server_uri'], {}), "('serverUri=(.*);version=(.*);sequence=(.*)', server_uri)\n", (3063, 3120), False, 'import re\n')] |
import argparse
import json
from data_management.DatasetFactory import datasetFactory
from config import cfg
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates metrics from output of a Classification network.' +
' Run `run_network.py <config> test` first.')
parser.add_argument('config_file', help='config file path')
parser.add_argument('results_file', help='results file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
refer = datasetFactory(cfg)
hamming_loss = 0.0
TP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FP = np.zeros((cfg.IMG_NET.N_LABELS+1,))
FN = np.zeros((cfg.IMG_NET.N_LABELS+1,))
total = 0.0
# load generation outputs
with open(args.results_file, 'r') as f:
genData = json.load(f)
for row in genData:
total += 1.0
hamming_loss += row['Hamming_Loss']
TP[row['TP_classes']] += 1
FP[row['FP_classes']] += 1
FN[row['FN_classes']] += 1
print("Mean Hamming Loss: %3.3f" % (hamming_loss/total))
print("Mean precision: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FP))))
print("Mean recall: %3.3f" % (np.sum(TP)/(np.sum(TP)+np.sum(FN))))
print("Class\tPrecision\tRecall")
for idx in range(cfg.IMG_NET.N_LABELS):
label = refer[0].coco.cats[refer[0].coco_cat_map[idx]]
print("%s\t%3.3f\t%3.3f" % (label['name'].ljust(20), TP[idx]/(TP[idx]+FP[idx]), TP[idx]/(TP[idx]+FN[idx]))) | [
"argparse.ArgumentParser",
"config.cfg.freeze",
"config.cfg.merge_from_file",
"numpy.sum",
"numpy.zeros",
"data_management.DatasetFactory.datasetFactory",
"json.load",
"config.cfg.merge_from_list"
] | [((171, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Calculates metrics from output of a Classification network.' +\n ' Run `run_network.py <config> test` first.')"}), "(description=\n 'Calculates metrics from output of a Classification network.' +\n ' Run `run_network.py <config> test` first.')\n", (194, 325), False, 'import argparse\n'), ((696, 733), 'config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config_file'], {}), '(args.config_file)\n', (715, 733), False, 'from config import cfg\n'), ((738, 768), 'config.cfg.merge_from_list', 'cfg.merge_from_list', (['args.opts'], {}), '(args.opts)\n', (757, 768), False, 'from config import cfg\n'), ((773, 785), 'config.cfg.freeze', 'cfg.freeze', ([], {}), '()\n', (783, 785), False, 'from config import cfg\n'), ((799, 818), 'data_management.DatasetFactory.datasetFactory', 'datasetFactory', (['cfg'], {}), '(cfg)\n', (813, 818), False, 'from data_management.DatasetFactory import datasetFactory\n'), ((852, 889), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (860, 889), True, 'import numpy as np\n'), ((897, 934), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (905, 934), True, 'import numpy as np\n'), ((942, 979), 'numpy.zeros', 'np.zeros', (['(cfg.IMG_NET.N_LABELS + 1,)'], {}), '((cfg.IMG_NET.N_LABELS + 1,))\n', (950, 979), True, 'import numpy as np\n'), ((1087, 1099), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1096, 1099), False, 'import json\n'), ((1418, 1428), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1424, 1428), True, 'import numpy as np\n'), ((1489, 1499), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1495, 1499), True, 'import numpy as np\n'), ((1430, 1440), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1436, 1440), True, 'import numpy as np\n'), ((1441, 1451), 'numpy.sum', 'np.sum', (['FP'], {}), '(FP)\n', (1447, 1451), True, 'import numpy as np\n'), ((1501, 1511), 'numpy.sum', 'np.sum', (['TP'], {}), '(TP)\n', (1507, 1511), True, 'import numpy as np\n'), ((1512, 1522), 'numpy.sum', 'np.sum', (['FN'], {}), '(FN)\n', (1518, 1522), True, 'import numpy as np\n')] |
# Copyright 2002-2018 MarkLogic Corporation. All Rights Reserved.
import boto3
import botocore
import logging
import hashlib
import json
import time
from botocore.exceptions import ClientError
log = logging.getLogger()
log.setLevel(logging.INFO)
# global variables
ec2_client = boto3.client('ec2')
asg_client = boto3.client('autoscaling')
ec2_resource = boto3.resource('ec2')
def eni_wait_for_attachment(eni_id):
max_rety = 10
retries = 0
sleep_interval = 10
eni_info = None
while True and retries < max_rety:
try:
eni_info = ec2_resource.NetworkInterface(id=eni_id)
except ClientError as e:
reason = "Failed to get network interface by id %s" % eni_id
log.exception(reason)
time.sleep(sleep_interval)
retries += 1
continue
if not eni_info.attachment:
time.sleep(sleep_interval)
retries += 1
continue
status = eni_info.attachment["Status"]
if status == "attached":
break
elif status == "attaching":
time.sleep(sleep_interval)
retries += 1
continue
else:
log.warning(
"Network interface %s in unexpected status: %s" % (eni_id, status)
)
retries += 1
continue
else:
log.warning(
"Waiting for network interface %s attachment timed out" % eni_id
)
def handler(event, context):
msg_text = event["Records"][0]["Sns"]["Message"]
msg = json.loads(msg_text)
if "LifecycleTransition" in msg and \
msg["LifecycleTransition"] == "autoscaling:EC2_INSTANCE_LAUNCHING":
log.info("Handle EC2_INSTANCE_LAUNCHING event %s" % (json.dumps(event, indent=2)))
on_launch(msg)
# continue with the life cycle event
try:
asg_client.complete_lifecycle_action(
LifecycleHookName=msg['LifecycleHookName'],
AutoScalingGroupName=msg['AutoScalingGroupName'],
LifecycleActionToken=msg['LifecycleActionToken'],
LifecycleActionResult='CONTINUE'
)
except botocore.exceptions.ClientError as e:
reason = "Error completing life cycle hook for instance"
log.exception(reason)
time.sleep(5) # sleep for 5 seconds to allow exception info being sent to CloudWatch
def on_launch(msg):
instance_id = msg["EC2InstanceId"]
log.info("Launch event of instance %s" % instance_id)
try:
instance = ec2_client.describe_instances(InstanceIds=[instance_id])
except botocore.exceptions.ClientError as e:
reason = "Failed to describe instance %s" % instance_id
log.exception(reason)
time.sleep(5)
return False
# manage ENI
subnet_id = instance['Reservations'][0]['Instances'][0]['SubnetId']
tags = instance['Reservations'][0]['Instances'][0]['Tags']
stack_name = None
stack_id = None
for tag in tags:
if tag["Key"] == "marklogic:stack:name":
stack_name = tag["Value"]
if tag["Key"] == "marklogic:stack:id":
stack_id = tag["Value"]
if stack_name and stack_id:
log.info("Subnet: %s, Stack Name: %s, Stack Id: %s" % (str(subnet_id), stack_name, stack_id))
id_hash = hashlib.md5(stack_id.encode()).hexdigest()
eni_tag_prefix = stack_name + "-" + id_hash + "_"
for i in range(0,200):
tag = eni_tag_prefix + str(i)
log.info("Querying unattached ENI with tag %s" % tag)
# query
response = ec2_client.describe_network_interfaces(
Filters=[
{
"Name": "tag:cluster-eni-id",
"Values": [tag]
},
{
"Name": "status",
"Values": ["available"]
},
{
"Name": "subnet-id",
"Values": [subnet_id]
}
]
)
if len(response["NetworkInterfaces"]) == 0:
log.info("No qualified ENI found")
continue
# attach the available ENI
for eni_info in response["NetworkInterfaces"]:
eni_id = eni_info["NetworkInterfaceId"]
try:
attachment = ec2_client.attach_network_interface(
NetworkInterfaceId=eni_id,
InstanceId=instance_id,
DeviceIndex=1
)
log.info("Attaching ENI %s to instance %s" % (eni_id, instance_id))
except botocore.exceptions.ClientError as e:
reason = "Error attaching network interface %s" % eni_id
log.exception(reason)
time.sleep(5)
continue
eni_wait_for_attachment(eni_id)
break
else:
continue
break
else:
log.warning("Tags for stack name or stack id not found") | [
"logging.getLogger",
"json.loads",
"boto3.client",
"json.dumps",
"time.sleep",
"boto3.resource"
] | [((202, 221), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (219, 221), False, 'import logging\n'), ((282, 301), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (294, 301), False, 'import boto3\n'), ((315, 342), 'boto3.client', 'boto3.client', (['"""autoscaling"""'], {}), "('autoscaling')\n", (327, 342), False, 'import boto3\n'), ((358, 379), 'boto3.resource', 'boto3.resource', (['"""ec2"""'], {}), "('ec2')\n", (372, 379), False, 'import boto3\n'), ((1571, 1591), 'json.loads', 'json.loads', (['msg_text'], {}), '(msg_text)\n', (1581, 1591), False, 'import json\n'), ((886, 912), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (896, 912), False, 'import time\n'), ((2807, 2820), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2817, 2820), False, 'import time\n'), ((764, 790), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (774, 790), False, 'import time\n'), ((1105, 1131), 'time.sleep', 'time.sleep', (['sleep_interval'], {}), '(sleep_interval)\n', (1115, 1131), False, 'import time\n'), ((1783, 1810), 'json.dumps', 'json.dumps', (['event'], {'indent': '(2)'}), '(event, indent=2)\n', (1793, 1810), False, 'import json\n'), ((2367, 2380), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2377, 2380), False, 'import time\n'), ((4996, 5009), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5006, 5009), False, 'import time\n')] |
import unittest
from xsertion.layers import *
from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten
from keras.models import Model
import json
def desc(model : Model):
base_model_disc = json.loads(model.to_json())
return base_model_disc['config']
def topo_check(layerlist):
ind = {layer: i for i,layer in enumerate(layerlist)}
for i, layer in enumerate(layerlist):
if any(ind[l] > i for l in layer.get_inbound()): # all incoming must be before i
return False
if any(ind[l] < i for l in layer._get_outbound()): # all outgoing must be after i
return False
return True
class ParsingTestCase(unittest.TestCase):
def test_layer_con_and_config(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', name='TestLayer')
config = json.loads(json.dumps(c1.get_config())) # transform all tuples to lists
model=Model(input=it, output=c1(it))
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertDictEqual(config, layers[1].config)
self.assertEqual(layers[0], list(layers[1].get_inbound())[0])
self.assertEqual(layers[1], list(layers[0]._get_outbound())[0])
self.assertTrue(topo_check(layers))
def test_linear_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
a1 = Flatten()(c1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(4, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_branching_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
c2 = Convolution2D(32, 3, 3, activation='relu')(it)
c3 = Convolution2D(32, 3, 3, activation='relu')(it)
m1 = merge([c1, c2, c3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(7, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_branching_multistage_model(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(10, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_skip_connnection(self):
it = Input(shape=(3,32,32), name='TestInput')
c1 = Convolution2D(3, 3, 3, border_mode='same', dim_ordering='th')(it) #dim_ordering to force match on inputshape
b1 = Activation('relu')(c1)
m1 = merge([b1, it], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual(6, len(layers))
self.assertEqual(1, len(model_inputs))
self.assertEqual(1, len(model_outputs))
self.assertEqual("TestInput", layers[0].get_name())
self.assertEqual("TestOutput", layers[-1].get_name())
self.assertTrue(topo_check(layers))
def test_complex_skip(self):
l1 = Input((3,32,32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2,l3], name='5')
l6 = merge([l1,l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6,l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
self.assertEqual('1', layers[0].get_name())
self.assertTrue(topo_check(layers))
self.assertListEqual(['1','2','3','4','5','7','6','8'], [l.get_name() for l in layers])
class ReplicationTestCase(unittest.TestCase):
def test_replication_layer_properties(self):
#use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
repl_list = replicate_layerlist(layers)
for l1, l2 in zip(layers, repl_list):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
def test_replication_layer_connections(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
def assertSameLayer(l1, l2):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
repl_list = replicate_layerlist(layers)
for l1, l2 in zip(layers, repl_list):
# build matching inbound lists
for il in l1.get_inbound():
for il2 in l2.get_inbound():
if layers.index(il) == repl_list.index(il2):
assertSameLayer(il, il2)
def test_replication_layer_con_consitency(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
llayers = layers[3:] # only take 4, 5, 6, 7, 8
repl_layers = replicate_layerlist(llayers)
self.assertEqual(0, len(repl_layers[0].get_inbound())) # no connections for 4 been inserted
self.assertEqual(0, len(repl_layers[1].get_inbound())) # no connections for 5 has been inserted
self.assertEqual(1, len(repl_layers[3].get_inbound())) # only connection to 4 has been included for 6
def assertSameLayer(l1, l2):
self.assertEqual(l1.class_name, l2.class_name)
self.assertEqual(l1.get_name(), l2.get_name())
self.assertDictEqual(l1.config, l2.config)
assertSameLayer(list(repl_layers[3].get_inbound())[0], layers[3])
def test_xspot_replication(self):
# use keras layers to quickly fill the list
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l2)
l5 = merge([l2, l3], name='5')
l6 = merge([l1, l4], name='6')
l7 = Activation('relu', name='7')(l5)
l8 = merge([l6, l7], name='8')
model = Model(input=l1, output=l8)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[4], 16)
layers.insert(5, xspot)
repl_layers = replicate_layerlist(layers)
self.assertEqual('XSpot', repl_layers[5].class_name)
self.assertEqual(4, repl_layers.index(list(repl_layers[5].get_inbound())[0]))
class XLayerTestCase(unittest.TestCase):
def test_xspot_insertion_simple(self):
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l1)
l5 = merge([l2, l3, l4], name='5')
model = Model(l1, l5)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[2], 16) # insert after 3
# check that l3 is now only connected to xspot
self.assertEqual(list(layers[2]._get_outbound())[0], xspot)
def test_xspot_insertion_branching(self):
l1 = Input((3, 32, 32), name='1')
l2 = Activation('relu', name='2')(l1)
l3 = Activation('relu', name='3')(l1)
l4 = Activation('relu', name='4')(l1)
l5 = merge([l2, l3, l4], name='5')
model = Model(l1, l5)
layers, model_inputs, model_outputs = parse_model_description(desc(model))
xspot = XLayerBP.insertXspot(layers[2], 16) # insert after 3
# check that l5 is now connected to l2, l4, and xspot
b = layers[-1].get_inbound()
self.assertTrue(xspot in b)
self.assertTrue(layers[1] in b)
self.assertTrue(layers[3] in b)
class RenderingTestCase(unittest.TestCase):
def test_parse_render(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
def test_render_xsport_skip(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
xspots = []
for i,layer in enumerate(layers):
if layer.class_name == "Convolution2D":
xspot = XLayerBP.insertXspot(layer, 32)
xspots.append((i, xspot))
for c, (i, xs) in enumerate(xspots):
layers.insert(i+1+c, xs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
def test_render_xsport_skip_merge(self):
it = Input(shape=(3, 32, 32), name='TestInput')
c1 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b1 = Activation('relu')(c1)
c2 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b2 = Activation('relu')(c2)
c3 = Convolution2D(32, 3, 3, activation='relu', dim_ordering='th')(it)
b3 = Activation('relu')(c3)
m1 = merge([b1, b2, b3], mode='sum')
a1 = Flatten()(m1)
d1 = Dense(10, activation='softmax', name="TestOutput")(a1)
model = Model(input=it, output=d1)
mdescs = desc(model)
layers, model_inputs, model_outputs = parse_model_description(mdescs)
xspots = []
for i, layer in enumerate(layers):
if layer.class_name == "Activation":
xspot = XLayerBP.insertXspot(layer, 32)
xspots.append((i, xspot))
for c, (i, xs) in enumerate(xspots):
layers.insert(i + 1 + c, xs)
rend_descs = render(model_inputs, layers, model_outputs)
for inp in mdescs['input_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['input_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for inp in mdescs['output_layers']:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in rend_descs['output_layers']:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12==p1 and p22==p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
for layer in mdescs['layers']:
for llayer in rend_descs['layers']:
if layer['name'] == llayer['name']:
self.assertDictEqual(layer['config'], llayer['config'])
if len(layer['inbound_nodes']) > 0:
for inp in layer['inbound_nodes'][0]:
nm, p1, p2 = inp[0], inp[1], inp[2]
for inp2 in llayer['inbound_nodes'][0]:
nm2, p12, p22 = inp2[0], inp2[1], inp2[2]
if nm2 == nm and p12 == p1 and p22 == p2:
self.assertTrue(True)
break
else:
self.assertTrue(False)
break
else:
self.assertTrue(False)
if __name__=="__main__":
unittest.main() | [
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"keras.layers.merge",
"keras.layers.Dense",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.Activation",
"unittest.main"
] | [((18754, 18769), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18767, 18769), False, 'import unittest\n'), ((769, 811), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (774, 811), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((825, 885), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'name': '"""TestLayer"""'}), "(32, 3, 3, activation='relu', name='TestLayer')\n", (838, 885), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((1392, 1434), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (1397, 1434), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((1604, 1630), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (1609, 1630), False, 'from keras.models import Model\n'), ((2069, 2111), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (2074, 2111), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2303, 2334), 'keras.layers.merge', 'merge', (['[c1, c2, c3]'], {'mode': '"""sum"""'}), "([c1, c2, c3], mode='sum')\n", (2308, 2334), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2446, 2472), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (2451, 2472), False, 'from keras.models import Model\n'), ((2923, 2965), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (2928, 2965), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3265, 3296), 'keras.layers.merge', 'merge', (['[b1, b2, b3]'], {'mode': '"""sum"""'}), "([b1, b2, b3], mode='sum')\n", (3270, 3296), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3408, 3434), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (3413, 3434), False, 'from keras.models import Model\n'), ((3876, 3918), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (3881, 3918), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4088, 4115), 'keras.layers.merge', 'merge', (['[b1, it]'], {'mode': '"""sum"""'}), "([b1, it], mode='sum')\n", (4093, 4115), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4227, 4253), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (4232, 4253), False, 'from keras.models import Model\n'), ((4690, 4718), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (4695, 4718), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4868, 4893), 'keras.layers.merge', 'merge', (['[l2, l3]'], {'name': '"""5"""'}), "([l2, l3], name='5')\n", (4873, 4893), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4906, 4931), 'keras.layers.merge', 'merge', (['[l1, l4]'], {'name': '"""6"""'}), "([l1, l4], name='6')\n", (4911, 4931), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4990, 5015), 'keras.layers.merge', 'merge', (['[l6, l7]'], {'name': '"""8"""'}), "([l6, l7], name='8')\n", (4995, 5015), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5031, 5057), 'keras.models.Model', 'Model', ([], {'input': 'l1', 'output': 'l8'}), '(input=l1, output=l8)\n', (5036, 5057), False, 'from keras.models import Model\n'), ((5497, 5525), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (5502, 5525), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5677, 5702), 'keras.layers.merge', 'merge', (['[l2, l3]'], {'name': '"""5"""'}), "([l2, l3], name='5')\n", (5682, 5702), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5716, 5741), 'keras.layers.merge', 'merge', (['[l1, l4]'], {'name': '"""6"""'}), "([l1, l4], name='6')\n", (5721, 5741), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5801, 5826), 'keras.layers.merge', 'merge', (['[l6, l7]'], {'name': '"""8"""'}), "([l6, l7], name='8')\n", (5806, 5826), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5843, 5869), 'keras.models.Model', 'Model', ([], {'input': 'l1', 'output': 'l8'}), '(input=l1, output=l8)\n', (5848, 5869), False, 'from keras.models import Model\n'), ((6337, 6365), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (6342, 6365), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6517, 6542), 'keras.layers.merge', 'merge', (['[l2, l3]'], {'name': '"""5"""'}), "([l2, l3], name='5')\n", (6522, 6542), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6556, 6581), 'keras.layers.merge', 'merge', (['[l1, l4]'], {'name': '"""6"""'}), "([l1, l4], name='6')\n", (6561, 6581), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6641, 6666), 'keras.layers.merge', 'merge', (['[l6, l7]'], {'name': '"""8"""'}), "([l6, l7], name='8')\n", (6646, 6666), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6683, 6709), 'keras.models.Model', 'Model', ([], {'input': 'l1', 'output': 'l8'}), '(input=l1, output=l8)\n', (6688, 6709), False, 'from keras.models import Model\n'), ((7460, 7488), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (7465, 7488), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7640, 7665), 'keras.layers.merge', 'merge', (['[l2, l3]'], {'name': '"""5"""'}), "([l2, l3], name='5')\n", (7645, 7665), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7679, 7704), 'keras.layers.merge', 'merge', (['[l1, l4]'], {'name': '"""6"""'}), "([l1, l4], name='6')\n", (7684, 7704), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7764, 7789), 'keras.layers.merge', 'merge', (['[l6, l7]'], {'name': '"""8"""'}), "([l6, l7], name='8')\n", (7769, 7789), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7806, 7832), 'keras.models.Model', 'Model', ([], {'input': 'l1', 'output': 'l8'}), '(input=l1, output=l8)\n', (7811, 7832), False, 'from keras.models import Model\n'), ((8727, 8755), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (8732, 8755), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8907, 8932), 'keras.layers.merge', 'merge', (['[l2, l3]'], {'name': '"""5"""'}), "([l2, l3], name='5')\n", (8912, 8932), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8946, 8971), 'keras.layers.merge', 'merge', (['[l1, l4]'], {'name': '"""6"""'}), "([l1, l4], name='6')\n", (8951, 8971), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9031, 9056), 'keras.layers.merge', 'merge', (['[l6, l7]'], {'name': '"""8"""'}), "([l6, l7], name='8')\n", (9036, 9056), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9073, 9099), 'keras.models.Model', 'Model', ([], {'input': 'l1', 'output': 'l8'}), '(input=l1, output=l8)\n', (9078, 9099), False, 'from keras.models import Model\n'), ((9565, 9593), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (9570, 9593), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9745, 9774), 'keras.layers.merge', 'merge', (['[l2, l3, l4]'], {'name': '"""5"""'}), "([l2, l3, l4], name='5')\n", (9750, 9774), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9791, 9804), 'keras.models.Model', 'Model', (['l1', 'l5'], {}), '(l1, l5)\n', (9796, 9804), False, 'from keras.models import Model\n'), ((10141, 10169), 'keras.layers.Input', 'Input', (['(3, 32, 32)'], {'name': '"""1"""'}), "((3, 32, 32), name='1')\n", (10146, 10169), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10321, 10350), 'keras.layers.merge', 'merge', (['[l2, l3, l4]'], {'name': '"""5"""'}), "([l2, l3, l4], name='5')\n", (10326, 10350), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10367, 10380), 'keras.models.Model', 'Model', (['l1', 'l5'], {}), '(l1, l5)\n', (10372, 10380), False, 'from keras.models import Model\n'), ((10841, 10883), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (10846, 10883), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11242, 11273), 'keras.layers.merge', 'merge', (['[b1, b2, b3]'], {'mode': '"""sum"""'}), "([b1, b2, b3], mode='sum')\n", (11247, 11273), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11385, 11411), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (11390, 11411), False, 'from keras.models import Model\n'), ((13287, 13329), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (13292, 13329), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13688, 13719), 'keras.layers.merge', 'merge', (['[b1, b2, b3]'], {'mode': '"""sum"""'}), "([b1, b2, b3], mode='sum')\n", (13693, 13719), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13831, 13857), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (13836, 13857), False, 'from keras.models import Model\n'), ((16034, 16076), 'keras.layers.Input', 'Input', ([], {'shape': '(3, 32, 32)', 'name': '"""TestInput"""'}), "(shape=(3, 32, 32), name='TestInput')\n", (16039, 16076), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16435, 16466), 'keras.layers.merge', 'merge', (['[b1, b2, b3]'], {'mode': '"""sum"""'}), "([b1, b2, b3], mode='sum')\n", (16440, 16466), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16578, 16604), 'keras.models.Model', 'Model', ([], {'input': 'it', 'output': 'd1'}), '(input=it, output=d1)\n', (16583, 16604), False, 'from keras.models import Model\n'), ((1446, 1488), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (1459, 1488), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((1506, 1515), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1513, 1515), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((1533, 1583), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (1538, 1583), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2123, 2165), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (2136, 2165), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2183, 2225), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (2196, 2225), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2243, 2285), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (2256, 2285), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2348, 2357), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2355, 2357), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2375, 2425), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (2380, 2425), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((2977, 3019), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (2990, 3019), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3037, 3055), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3047, 3055), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3073, 3115), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (3086, 3115), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3133, 3151), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3143, 3151), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3169, 3211), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, 3, activation='relu')\n", (3182, 3211), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3229, 3247), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3239, 3247), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3310, 3319), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3317, 3319), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3337, 3387), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (3342, 3387), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((3930, 3991), 'keras.layers.Convolution2D', 'Convolution2D', (['(3)', '(3)', '(3)'], {'border_mode': '"""same"""', 'dim_ordering': '"""th"""'}), "(3, 3, 3, border_mode='same', dim_ordering='th')\n", (3943, 3991), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4052, 4070), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4062, 4070), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4129, 4138), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4136, 4138), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4156, 4206), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (4161, 4206), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4730, 4758), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (4740, 4758), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4776, 4804), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (4786, 4804), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4822, 4850), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (4832, 4850), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((4944, 4972), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""7"""'}), "('relu', name='7')\n", (4954, 4972), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5539, 5567), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (5549, 5567), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5585, 5613), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (5595, 5613), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5631, 5659), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (5641, 5659), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((5755, 5783), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""7"""'}), "('relu', name='7')\n", (5765, 5783), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6379, 6407), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (6389, 6407), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6425, 6453), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (6435, 6453), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6471, 6499), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (6481, 6499), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((6595, 6623), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""7"""'}), "('relu', name='7')\n", (6605, 6623), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7502, 7530), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (7512, 7530), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7548, 7576), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (7558, 7576), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7594, 7622), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (7604, 7622), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((7718, 7746), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""7"""'}), "('relu', name='7')\n", (7728, 7746), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8769, 8797), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (8779, 8797), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8815, 8843), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (8825, 8843), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8861, 8889), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (8871, 8889), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((8985, 9013), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""7"""'}), "('relu', name='7')\n", (8995, 9013), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9607, 9635), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (9617, 9635), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9653, 9681), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (9663, 9681), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((9699, 9727), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (9709, 9727), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10183, 10211), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""2"""'}), "('relu', name='2')\n", (10193, 10211), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10229, 10257), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""3"""'}), "('relu', name='3')\n", (10239, 10257), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10275, 10303), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""4"""'}), "('relu', name='4')\n", (10285, 10303), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10897, 10958), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (10910, 10958), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((10976, 10994), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10986, 10994), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11012, 11073), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (11025, 11073), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11091, 11109), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11101, 11109), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11127, 11188), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (11140, 11188), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11206, 11224), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11216, 11224), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11287, 11296), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (11294, 11296), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((11314, 11364), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (11319, 11364), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13343, 13404), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (13356, 13404), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13422, 13440), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13432, 13440), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13458, 13519), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (13471, 13519), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13537, 13555), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13547, 13555), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13573, 13634), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (13586, 13634), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13652, 13670), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13662, 13670), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13733, 13742), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13740, 13742), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((13760, 13810), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (13765, 13810), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16090, 16151), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (16103, 16151), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16169, 16187), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16179, 16187), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16205, 16266), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (16218, 16266), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16284, 16302), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16294, 16302), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16320, 16381), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'activation': '"""relu"""', 'dim_ordering': '"""th"""'}), "(32, 3, 3, activation='relu', dim_ordering='th')\n", (16333, 16381), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16399, 16417), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16409, 16417), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16480, 16489), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (16487, 16489), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n'), ((16507, 16557), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""TestOutput"""'}), "(10, activation='softmax', name='TestOutput')\n", (16512, 16557), False, 'from keras.layers import Input, MaxPooling2D, Convolution2D, Activation, merge, Dense, Flatten\n')] |
import time
from annotypes import Anno, add_call_types
from malcolm.core import PartRegistrar
from malcolm.modules import builtin
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
with Anno("The demand value to move our counter motor to"):
ADemand = float
with Anno("The amount of time to get to the demand position"):
ADuration = float
# How long between ticks of the "motor" position while moving
UPDATE_TICK = 0.1
# We will set these attributes on the child block, so don't save them
@builtin.util.no_save("counter")
class CounterMovePart(builtin.parts.ChildPart):
"""Provides control of a `counter_block` within a `ManagerController`"""
def __init__(self, name, mri):
# type: (APartName, AMri) -> None
super(CounterMovePart, self).__init__(
name, mri, stateful=False, initial_visibility=True)
def setup(self, registrar):
# type: (PartRegistrar) -> None
super(CounterMovePart, self).setup(registrar)
# Method
registrar.add_method_model(
self.move, self.name + "Move", needs_context=True)
@add_call_types
def move(self, context, demand, duration=0):
# type: (builtin.hooks.AContext, ADemand, ADuration) -> None
"""Move the counter to the demand value, taking duration seconds like
a motor would do"""
start = time.time()
child = context.block_view(self.mri)
distance = demand - child.counter.value
remaining = duration
# "Move" the motor, ticking at UPDATE_TICK rate
while remaining > 0:
child.counter.put_value(demand - distance * remaining / duration)
context.sleep(min(remaining, UPDATE_TICK))
remaining = start + duration - time.time()
# Final move to make sure we end up at the right place
child.counter.put_value(demand)
| [
"malcolm.modules.builtin.util.no_save",
"annotypes.Anno",
"time.time"
] | [((585, 616), 'malcolm.modules.builtin.util.no_save', 'builtin.util.no_save', (['"""counter"""'], {}), "('counter')\n", (605, 616), False, 'from malcolm.modules import builtin\n'), ((271, 324), 'annotypes.Anno', 'Anno', (['"""The demand value to move our counter motor to"""'], {}), "('The demand value to move our counter motor to')\n", (275, 324), False, 'from annotypes import Anno, add_call_types\n'), ((351, 407), 'annotypes.Anno', 'Anno', (['"""The amount of time to get to the demand position"""'], {}), "('The amount of time to get to the demand position')\n", (355, 407), False, 'from annotypes import Anno, add_call_types\n'), ((1435, 1446), 'time.time', 'time.time', ([], {}), '()\n', (1444, 1446), False, 'import time\n'), ((1830, 1841), 'time.time', 'time.time', ([], {}), '()\n', (1839, 1841), False, 'import time\n')] |
# Copyright 2021 JD.com, Inc., JD AI
"""
@author: <NAME>
@contact: <EMAIL>
"""
import torch
import torch.nn as nn
__all__ = ["AttentionPooler"]
class AttentionPooler(nn.Module):
def __init__(
self,
*,
hidden_size: int,
output_size: int,
dropout: float,
use_bn: bool
):
super(AttentionPooler, self).__init__()
self.att = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(hidden_size, 1)
)
self.embed = nn.Linear(hidden_size, output_size)
self.softmax = nn.Softmax(dim=-1)
self.bn = nn.BatchNorm1d(output_size) if use_bn else None
def forward(self, hidden_states, masks = None, **kwargs):
score = self.att(hidden_states).squeeze(-1)
if masks is not None:
score = score + masks.view(score.size(0), -1)
score = self.softmax(score)
output = score.unsqueeze(1).matmul(hidden_states).squeeze(1)
output = self.embed(output)
if self.bn is not None:
output = self.bn(output)
return output | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.BatchNorm1d",
"torch.nn.Linear"
] | [((599, 634), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (608, 634), True, 'import torch.nn as nn\n'), ((658, 676), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (668, 676), True, 'import torch.nn as nn\n'), ((423, 458), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (432, 458), True, 'import torch.nn as nn\n'), ((472, 493), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (479, 493), True, 'import torch.nn as nn\n'), ((507, 528), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (517, 528), True, 'import torch.nn as nn\n'), ((542, 567), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (551, 567), True, 'import torch.nn as nn\n'), ((695, 722), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['output_size'], {}), '(output_size)\n', (709, 722), True, 'import torch.nn as nn\n')] |
"""
Signal endpoints that can be used in testbenches
"""
import textwrap
from typing import Dict
import sonar.base_types as base
class Endpoint(base.SonarObject):
"""
Endpoint class
"""
arguments: Dict[str, int] = {}
@classmethod
def instantiate(cls, _indent):
"""
Instantiate the endpoint logic
Args:
_indent (str): Indentation to add to each line
Returns:
str: Updated ip_inst
"""
return ""
@classmethod
def asdict(cls):
tmp = {
"instantiate": False,
}
return tmp
class InterfaceEndpoint(Endpoint):
"""
InterfaceEndpoints class
"""
actions: Dict[str, Dict] = {}
@staticmethod
def import_packages_local(_interface):
"""
Specifies any packages that must be imported once per endpoint
Args:
interface (Interface): The interface belonging to the endpoint
Returns:
str: Packages to be imported
"""
return ""
@staticmethod
def initial_blocks(_indent):
"""
Any text that should be inside an initial block
Args:
indent (str): Indentation to add to each line
Returns:
list[str]: List of strings that go into separate initial blocks
"""
return []
@staticmethod
def prologue(_indent):
"""
Any text that should be part of the testbench as a prologue outside any
blocks such as variable declarations.
Args:
prologue (str): The preceding text to append to
indent (str): Indentation to add to each line
Returns:
str: Updated prologue
"""
return ""
@staticmethod
def source_tcl(_interface, _path):
"""
Any TCL files that should be sourced as part of initializing the
interface
Args:
interface (AXI4LiteSlave): AXI4LiteSlave object
path (str): Path where to place the TCL source files
"""
return None
@classmethod
def asdict(cls):
tmp = super().asdict()
tmp["import_packages_local"] = False
tmp["initial_blocks"] = False
tmp["source_tcl"] = False
tmp["prologue"] = False
return tmp
class PeriodicSignal(Endpoint):
"""
Endpoint class
"""
@classmethod
def instantiate(cls, indent):
"""
Any modules that this interface instantiates in SV.
Args:
indent (str): Indentation to add to each line
Returns:
str: Updated ip_inst
"""
name = cls.arguments["name"]
initial_value = cls.arguments["value"]
period = cls.arguments["period"]
block = textwrap.indent(
textwrap.dedent(
f"""\
initial begin
{name}_endpoint[$$endpointIndex] = {initial_value};
forever begin
#({period}/2) {name}_endpoint[$$endpointIndex] <= ~{name}_endpoint[$$endpointIndex];
end
end
"""
),
indent,
)
return block
@classmethod
def asdict(cls):
tmp = {
"instantiate": False,
}
return tmp
| [
"textwrap.dedent"
] | [((2842, 3153), 'textwrap.dedent', 'textwrap.dedent', (['f""" initial begin\n {name}_endpoint[$$endpointIndex] = {initial_value};\n forever begin\n #({period}/2) {name}_endpoint[$$endpointIndex] <= ~{name}_endpoint[$$endpointIndex];\n end\n end\n """'], {}), '(\n f""" initial begin\n {name}_endpoint[$$endpointIndex] = {initial_value};\n forever begin\n #({period}/2) {name}_endpoint[$$endpointIndex] <= ~{name}_endpoint[$$endpointIndex];\n end\n end\n """\n )\n', (2857, 3153), False, 'import textwrap\n')] |
#!/usr/bin/env python
"""
Generate the file classes_to_be_wrapped.hpp, which contains includes,
instantiation and naming typedefs for all classes that are to be
automatically wrapped.
"""
import os
import ntpath
class CppHeaderCollectionWriter():
"""
This class manages generation of the header collection file for
parsing by CastXML
"""
def __init__(self, package_info, wrapper_root):
self.wrapper_root = wrapper_root
self.package_info = package_info
self.header_file_name = "wrapper_header_collection.hpp"
self.hpp_string = ""
self.class_dict = {}
self.free_func_dict = {}
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
self.class_dict[eachClassInfo.name] = eachClassInfo
for eachFuncInfo in eachModule.free_function_info:
self.free_func_dict[eachFuncInfo.name] = eachFuncInfo
def add_custom_header_code(self):
"""
Any custom header code goes here
"""
pass
def write_file(self):
"""
The actual write
"""
if not os.path.exists(self.wrapper_root + "/"):
os.makedirs(self.wrapper_root + "/")
file_path = self.wrapper_root + "/" + self.header_file_name
hpp_file = open(file_path, 'w')
hpp_file.write(self.hpp_string)
hpp_file.close()
def should_include_all(self):
"""
Return whether all source files in the module source locs should be included
"""
for eachModule in self.package_info.module_info:
if eachModule.use_all_classes or eachModule.use_all_free_functions:
return True
return False
def write(self):
"""
Main method for generating the header file output string
"""
hpp_header_dict = {'package_name': self.package_info.name}
hpp_header_template = """\
#ifndef {package_name}_HEADERS_HPP_
#define {package_name}_HEADERS_HPP_
// Includes
"""
self.hpp_string = hpp_header_template.format(**hpp_header_dict)
# Now our own includes
if self.should_include_all():
for eachFile in self.package_info.source_hpp_files:
include_name = ntpath.basename(eachFile)
self.hpp_string += '#include "' + include_name + '"\n'
else:
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
if eachClassInfo.source_file is not None:
self.hpp_string += '#include "' + eachClassInfo.source_file + '"\n'
elif eachClassInfo.source_file_full_path is not None:
include_name = ntpath.basename(eachClassInfo.source_file_full_path)
self.hpp_string += '#include "' + include_name + '"\n'
for eachFuncInfo in eachModule.free_function_info:
if eachFuncInfo.source_file_full_path is not None:
include_name = ntpath.basename(eachFuncInfo.source_file_full_path)
self.hpp_string += '#include "' + include_name + '"\n'
# Add the template instantiations
self.hpp_string += "\n// Instantiate Template Classes \n"
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
full_names = eachClassInfo.get_full_names()
if len(full_names) == 1:
continue
prefix = "template class "
for eachTemplateName in full_names:
self.hpp_string += prefix + eachTemplateName.replace(" ","") + ";\n"
# Add typdefs for nice naming
self.hpp_string += "\n// Typedef for nicer naming\n"
self.hpp_string += "namespace cppwg{ \n"
for eachModule in self.package_info.module_info:
for eachClassInfo in eachModule.class_info:
full_names = eachClassInfo.get_full_names()
if len(full_names) == 1:
continue
short_names = eachClassInfo.get_short_names()
for idx, eachTemplateName in enumerate(full_names):
short_name = short_names[idx]
typdef_prefix = "typedef " + eachTemplateName.replace(" ","") + " "
self.hpp_string += typdef_prefix + short_name + ";\n"
self.hpp_string += "}\n"
self.add_custom_header_code()
self.hpp_string += "\n#endif // {}_HEADERS_HPP_\n".format(self.package_info.name)
self.write_file()
| [
"os.path.exists",
"ntpath.basename",
"os.makedirs"
] | [((1194, 1233), 'os.path.exists', 'os.path.exists', (["(self.wrapper_root + '/')"], {}), "(self.wrapper_root + '/')\n", (1208, 1233), False, 'import os\n'), ((1247, 1283), 'os.makedirs', 'os.makedirs', (["(self.wrapper_root + '/')"], {}), "(self.wrapper_root + '/')\n", (1258, 1283), False, 'import os\n'), ((2358, 2383), 'ntpath.basename', 'ntpath.basename', (['eachFile'], {}), '(eachFile)\n', (2373, 2383), False, 'import ntpath\n'), ((3166, 3217), 'ntpath.basename', 'ntpath.basename', (['eachFuncInfo.source_file_full_path'], {}), '(eachFuncInfo.source_file_full_path)\n', (3181, 3217), False, 'import ntpath\n'), ((2857, 2909), 'ntpath.basename', 'ntpath.basename', (['eachClassInfo.source_file_full_path'], {}), '(eachClassInfo.source_file_full_path)\n', (2872, 2909), False, 'import ntpath\n')] |
"""add separate reported and deleted tables
Revision ID: 491383f70589
Revises: <PASSWORD>
Create Date: 2020-06-26 05:23:30.267933
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '491383f70589'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('deleted',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fact_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('date_deleted', sa.TIMESTAMP(timezone=True), nullable=False),
sa.ForeignKeyConstraint(['fact_id'], ['fact.fact_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_deleted_id'), 'deleted', ['id'], unique=False)
op.create_table('reported',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fact_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('date_reported', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('suggestion', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['fact_id'], ['fact.fact_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_reported_id'), 'reported', ['id'], unique=False)
op.drop_column('suspended', 'comment')
op.drop_column('suspended', 'suspend_type')
op.drop_column('suspended', 'suggestion')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('suspended', sa.Column('suggestion', postgresql.JSONB(astext_type=sa.Text()), autoincrement=False, nullable=True))
op.add_column('suspended', sa.Column('suspend_type', postgresql.ENUM('delete', 'suspend', 'report', name='suspendtype'), autoincrement=False, nullable=False))
op.add_column('suspended', sa.Column('comment', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_index(op.f('ix_reported_id'), table_name='reported')
op.drop_table('reported')
op.drop_index(op.f('ix_deleted_id'), table_name='deleted')
op.drop_table('deleted')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"alembic.op.f",
"alembic.op.drop_column",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.VARCHAR",
"sqlalchemy.Integer",
"sqlalchemy.Text",
"sqlalchemy.TIMESTAMP",
"sqlalchemy.dialects.postgresql.ENUM"
] | [((1548, 1586), 'alembic.op.drop_column', 'op.drop_column', (['"""suspended"""', '"""comment"""'], {}), "('suspended', 'comment')\n", (1562, 1586), False, 'from alembic import op\n'), ((1591, 1634), 'alembic.op.drop_column', 'op.drop_column', (['"""suspended"""', '"""suspend_type"""'], {}), "('suspended', 'suspend_type')\n", (1605, 1634), False, 'from alembic import op\n'), ((1639, 1680), 'alembic.op.drop_column', 'op.drop_column', (['"""suspended"""', '"""suggestion"""'], {}), "('suspended', 'suggestion')\n", (1653, 1680), False, 'from alembic import op\n'), ((2269, 2294), 'alembic.op.drop_table', 'op.drop_table', (['"""reported"""'], {}), "('reported')\n", (2282, 2294), False, 'from alembic import op\n'), ((2362, 2386), 'alembic.op.drop_table', 'op.drop_table', (['"""deleted"""'], {}), "('deleted')\n", (2375, 2386), False, 'from alembic import op\n'), ((719, 773), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['fact_id']", "['fact.fact_id']"], {}), "(['fact_id'], ['fact.fact_id'])\n", (742, 773), True, 'import sqlalchemy as sa\n'), ((781, 830), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (804, 830), True, 'import sqlalchemy as sa\n'), ((838, 867), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (861, 867), True, 'import sqlalchemy as sa\n'), ((894, 915), 'alembic.op.f', 'op.f', (['"""ix_deleted_id"""'], {}), "('ix_deleted_id')\n", (898, 915), False, 'from alembic import op\n'), ((1311, 1365), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['fact_id']", "['fact.fact_id']"], {}), "(['fact_id'], ['fact.fact_id'])\n", (1334, 1365), True, 'import sqlalchemy as sa\n'), ((1373, 1422), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (1396, 1422), True, 'import sqlalchemy as sa\n'), ((1430, 1459), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1453, 1459), True, 'import sqlalchemy as sa\n'), ((1486, 1508), 'alembic.op.f', 'op.f', (['"""ix_reported_id"""'], {}), "('ix_reported_id')\n", (1490, 1508), False, 'from alembic import op\n'), ((2218, 2240), 'alembic.op.f', 'op.f', (['"""ix_reported_id"""'], {}), "('ix_reported_id')\n", (2222, 2240), False, 'from alembic import op\n'), ((2313, 2334), 'alembic.op.f', 'op.f', (['"""ix_deleted_id"""'], {}), "('ix_deleted_id')\n", (2317, 2334), False, 'from alembic import op\n'), ((496, 508), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (506, 508), True, 'import sqlalchemy as sa\n'), ((552, 564), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (562, 564), True, 'import sqlalchemy as sa\n'), ((608, 620), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (618, 620), True, 'import sqlalchemy as sa\n'), ((669, 696), 'sqlalchemy.TIMESTAMP', 'sa.TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (681, 696), True, 'import sqlalchemy as sa\n'), ((1002, 1014), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1012, 1014), True, 'import sqlalchemy as sa\n'), ((1058, 1070), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1068, 1070), True, 'import sqlalchemy as sa\n'), ((1114, 1126), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1124, 1126), True, 'import sqlalchemy as sa\n'), ((1176, 1203), 'sqlalchemy.TIMESTAMP', 'sa.TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (1188, 1203), True, 'import sqlalchemy as sa\n'), ((1991, 2057), 'sqlalchemy.dialects.postgresql.ENUM', 'postgresql.ENUM', (['"""delete"""', '"""suspend"""', '"""report"""'], {'name': '"""suspendtype"""'}), "('delete', 'suspend', 'report', name='suspendtype')\n", (2006, 2057), False, 'from sqlalchemy.dialects import postgresql\n'), ((2149, 2161), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {}), '()\n', (2159, 2161), True, 'import sqlalchemy as sa\n'), ((1279, 1288), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (1286, 1288), True, 'import sqlalchemy as sa\n'), ((1885, 1894), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (1892, 1894), True, 'import sqlalchemy as sa\n')] |
"""Test the viscous fluid helper functions."""
__copyright__ = """
Copyright (C) 2021 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.random
import numpy.linalg as la # noqa
import pyopencl.clmath # noqa
import logging
import pytest # noqa
from pytools.obj_array import make_obj_array
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL
import grudge.op as op
from grudge.eager import (
EagerDGDiscretization,
interior_trace_pair
)
from meshmode.array_context import ( # noqa
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
from mirgecom.fluid import make_conserved
from mirgecom.transport import (
SimpleTransport,
PowerLawTransport
)
from mirgecom.eos import IdealSingleGas
logger = logging.getLogger(__name__)
@pytest.mark.parametrize("transport_model", [0, 1])
def test_viscous_stress_tensor(actx_factory, transport_model):
"""Test tau data structure and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
mass = 2*ones
energy = zeros + 2.5
mom = mass * velocity
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
if transport_model:
tv_model = SimpleTransport(bulk_viscosity=1.0, viscosity=0.5)
else:
tv_model = PowerLawTransport()
eos = IdealSingleGas(transport_model=tv_model)
mu = tv_model.viscosity(eos, cv)
lam = tv_model.volume_viscosity(eos, cv)
# Exact answer for tau
exp_grad_v = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
exp_grad_v_t = np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
exp_div_v = 15
exp_tau = (mu*(exp_grad_v + exp_grad_v_t)
+ lam*exp_div_v*np.eye(3))
from mirgecom.viscous import viscous_stress_tensor
tau = viscous_stress_tensor(discr, eos, cv, grad_cv)
# The errors come from grad_v
assert discr.norm(tau - exp_tau, np.inf) < 1e-12
# Box grid generator widget lifted from @majosm and slightly bent
def _get_box_mesh(dim, a, b, n, t=None):
dim_names = ["x", "y", "z"]
bttf = {}
for i in range(dim):
bttf["-"+str(i+1)] = ["-"+dim_names[i]]
bttf["+"+str(i+1)] = ["+"+dim_names[i]]
from meshmode.mesh.generation import generate_regular_rect_mesh as gen
return gen(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)
@pytest.mark.parametrize("order", [2, 3, 4])
@pytest.mark.parametrize("kappa", [0.0, 1.0, 2.3])
def test_poiseuille_fluxes(actx_factory, order, kappa):
"""Test the viscous fluxes using a Poiseuille input state."""
actx = actx_factory()
dim = 2
from pytools.convergence import EOCRecorder
e_eoc_rec = EOCRecorder()
p_eoc_rec = EOCRecorder()
base_pressure = 100000.0
pressure_ratio = 1.001
mu = 42 # arbitrary
left_boundary_location = 0
right_boundary_location = 0.1
ybottom = 0.
ytop = .02
nspecies = 0
spec_diffusivity = 0 * np.ones(nspecies)
transport_model = SimpleTransport(viscosity=mu, thermal_conductivity=kappa,
species_diffusivity=spec_diffusivity)
xlen = right_boundary_location - left_boundary_location
p_low = base_pressure
p_hi = pressure_ratio*base_pressure
dpdx = (p_low - p_hi) / xlen
rho = 1.0
eos = IdealSingleGas(transport_model=transport_model)
from mirgecom.initializers import PlanarPoiseuille
initializer = PlanarPoiseuille(density=rho, mu=mu)
def _elbnd_flux(discr, compute_interior_flux, compute_boundary_flux,
int_tpair, boundaries):
return (compute_interior_flux(int_tpair)
+ sum(compute_boundary_flux(btag) for btag in boundaries))
from mirgecom.flux import gradient_flux_central
def cv_flux_interior(int_tpair):
normal = thaw(actx, discr.normal(int_tpair.dd))
flux_weak = gradient_flux_central(int_tpair, normal)
return discr.project(int_tpair.dd, "all_faces", flux_weak)
def cv_flux_boundary(btag):
boundary_discr = discr.discr_from_dd(btag)
bnd_nodes = thaw(actx, boundary_discr.nodes())
cv_bnd = initializer(x_vec=bnd_nodes, eos=eos)
bnd_nhat = thaw(actx, discr.normal(btag))
from grudge.trace_pair import TracePair
bnd_tpair = TracePair(btag, interior=cv_bnd, exterior=cv_bnd)
flux_weak = gradient_flux_central(bnd_tpair, bnd_nhat)
return discr.project(bnd_tpair.dd, "all_faces", flux_weak)
for nfac in [1, 2, 4]:
npts_axis = nfac*(11, 21)
box_ll = (left_boundary_location, ybottom)
box_ur = (right_boundary_location, ytop)
mesh = _get_box_mesh(2, a=box_ll, b=box_ur, n=npts_axis)
logger.info(
f"Number of {dim}d elements: {mesh.nelements}"
)
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# compute max element size
from grudge.dt_utils import h_max_from_volume
h_max = h_max_from_volume(discr)
# form exact cv
cv = initializer(x_vec=nodes, eos=eos)
cv_int_tpair = interior_trace_pair(discr, cv)
boundaries = [BTAG_ALL]
cv_flux_bnd = _elbnd_flux(discr, cv_flux_interior, cv_flux_boundary,
cv_int_tpair, boundaries)
from mirgecom.operators import grad_operator
grad_cv = make_conserved(dim, q=grad_operator(discr, cv.join(),
cv_flux_bnd.join()))
xp_grad_cv = initializer.exact_grad(x_vec=nodes, eos=eos, cv_exact=cv)
xp_grad_v = 1/cv.mass * xp_grad_cv.momentum
xp_tau = mu * (xp_grad_v + xp_grad_v.transpose())
# sanity check the gradient:
relerr_scale_e = 1.0 / discr.norm(xp_grad_cv.energy, np.inf)
relerr_scale_p = 1.0 / discr.norm(xp_grad_cv.momentum, np.inf)
graderr_e = discr.norm((grad_cv.energy - xp_grad_cv.energy), np.inf)
graderr_p = discr.norm((grad_cv.momentum - xp_grad_cv.momentum), np.inf)
graderr_e *= relerr_scale_e
graderr_p *= relerr_scale_p
assert graderr_e < 5e-7
assert graderr_p < 5e-11
zeros = discr.zeros(actx)
ones = zeros + 1
pressure = eos.pressure(cv)
# grad of p should be dp/dx
xp_grad_p = make_obj_array([dpdx*ones, zeros])
grad_p = op.local_grad(discr, pressure)
dpscal = 1.0/np.abs(dpdx)
temperature = eos.temperature(cv)
tscal = rho*eos.gas_const()*dpscal
xp_grad_t = xp_grad_p/(cv.mass*eos.gas_const())
grad_t = op.local_grad(discr, temperature)
# sanity check
assert discr.norm(grad_p - xp_grad_p, np.inf)*dpscal < 5e-9
assert discr.norm(grad_t - xp_grad_t, np.inf)*tscal < 5e-9
# verify heat flux
from mirgecom.viscous import conductive_heat_flux
heat_flux = conductive_heat_flux(discr, eos, cv, grad_t)
xp_heat_flux = -kappa*xp_grad_t
assert discr.norm(heat_flux - xp_heat_flux, np.inf) < 2e-8
# verify diffusive mass flux is zilch (no scalar components)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
assert len(j) == 0
xp_e_flux = np.dot(xp_tau, cv.velocity) - xp_heat_flux
xp_mom_flux = xp_tau
from mirgecom.viscous import viscous_flux
vflux = viscous_flux(discr, eos, cv, grad_cv, grad_t)
efluxerr = (
discr.norm(vflux.energy - xp_e_flux, np.inf)
/ discr.norm(xp_e_flux, np.inf)
)
momfluxerr = (
discr.norm(vflux.momentum - xp_mom_flux, np.inf)
/ discr.norm(xp_mom_flux, np.inf)
)
assert discr.norm(vflux.mass, np.inf) == 0
e_eoc_rec.add_data_point(h_max, efluxerr)
p_eoc_rec.add_data_point(h_max, momfluxerr)
assert (
e_eoc_rec.order_estimate() >= order - 0.5
or e_eoc_rec.max_error() < 3e-9
)
assert (
p_eoc_rec.order_estimate() >= order - 0.5
or p_eoc_rec.max_error() < 2e-12
)
def test_species_diffusive_flux(actx_factory):
"""Test species diffusive flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
def test_diffusive_heat_flux(actx_factory):
"""Test diffusive heat flux and values against exact."""
actx = actx_factory()
dim = 3
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
# assemble velocities for simple, unique grad components
velocity_x = nodes[0] + 2*nodes[1] + 3*nodes[2]
velocity_y = 4*nodes[0] + 5*nodes[1] + 6*nodes[2]
velocity_z = 7*nodes[0] + 8*nodes[1] + 9*nodes[2]
velocity = make_obj_array([velocity_x, velocity_y, velocity_z])
# assemble y so that each one has simple, but unique grad components
nspecies = 2*dim
y = make_obj_array([ones for _ in range(nspecies)])
for idim in range(dim):
ispec = 2*idim
y[ispec] = (ispec+1)*(idim*dim+1)*sum([(iidim+1)*nodes[iidim]
for iidim in range(dim)])
y[ispec+1] = -y[ispec]
massval = 2
mass = massval*ones
energy = zeros + 2.5
mom = mass * velocity
species_mass = mass*y
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
grad_cv = make_conserved(dim, q=op.local_grad(discr, cv.join()))
mu_b = 1.0
mu = 0.5
kappa = 5.0
# assemble d_alpha so that every species has a unique j
d_alpha = np.array([(ispec+1) for ispec in range(nspecies)])
tv_model = SimpleTransport(bulk_viscosity=mu_b, viscosity=mu,
thermal_conductivity=kappa,
species_diffusivity=d_alpha)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import diffusive_flux
j = diffusive_flux(discr, eos, cv, grad_cv)
tol = 1e-10
for idim in range(dim):
ispec = 2*idim
exact_dy = np.array([((ispec+1)*(idim*dim+1))*(iidim+1)
for iidim in range(dim)])
exact_j = -massval * d_alpha[ispec] * exact_dy
assert discr.norm(j[ispec] - exact_j, np.inf) < tol
exact_j = massval * d_alpha[ispec+1] * exact_dy
assert discr.norm(j[ispec+1] - exact_j, np.inf) < tol
@pytest.mark.parametrize("array_valued", [False, True])
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_local_max_species_diffusivity(actx_factory, dim, array_valued):
"""Test the local maximum species diffusivity."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
zeros = discr.zeros(actx)
ones = zeros + 1.0
vel = .32
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = np.array([1., 2., 3.], dtype=object)
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
d_alpha_input = np.array([.1, .2, .3])
if array_valued:
f = 1 + 0.1*actx.np.sin(nodes[0])
d_alpha_input *= f
tv_model = SimpleTransport(species_diffusivity=d_alpha_input)
eos = IdealSingleGas(transport_model=tv_model)
d_alpha = tv_model.species_diffusivity(eos, cv)
from mirgecom.viscous import get_local_max_species_diffusivity
expected = .3*ones
if array_valued:
expected *= f
calculated = get_local_max_species_diffusivity(actx, discr, d_alpha)
assert discr.norm(calculated-expected, np.inf) == 0
@pytest.mark.parametrize("dim", [1, 2, 3])
@pytest.mark.parametrize("mu", [-1, 0, 1, 2])
@pytest.mark.parametrize("vel", [0, 1])
def test_viscous_timestep(actx_factory, dim, mu, vel):
"""Test timestep size."""
actx = actx_factory()
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
)
order = 1
discr = EagerDGDiscretization(actx, mesh, order=order)
zeros = discr.zeros(actx)
ones = zeros + 1.0
velocity = make_obj_array([zeros+vel for _ in range(dim)])
massval = 1
mass = massval*ones
# I *think* this energy should yield c=1.0
energy = zeros + 1.0 / (1.4*.4)
mom = mass * velocity
species_mass = None
cv = make_conserved(dim, mass=mass, energy=energy, momentum=mom,
species_mass=species_mass)
from grudge.dt_utils import characteristic_lengthscales
chlen = characteristic_lengthscales(actx, discr)
from grudge.op import nodal_min
chlen_min = nodal_min(discr, "vol", chlen)
mu = mu*chlen_min
if mu < 0:
mu = 0
tv_model = None
else:
tv_model = SimpleTransport(viscosity=mu)
eos = IdealSingleGas(transport_model=tv_model)
from mirgecom.viscous import get_viscous_timestep
dt_field = get_viscous_timestep(discr, eos, cv)
speed_total = actx.np.sqrt(np.dot(velocity, velocity)) + eos.sound_speed(cv)
dt_expected = chlen / (speed_total + (mu / chlen))
error = (dt_expected - dt_field) / dt_expected
assert discr.norm(error, np.inf) == 0
| [
"logging.getLogger",
"mirgecom.transport.PowerLawTransport",
"mirgecom.viscous.conductive_heat_flux",
"grudge.op.nodal_min",
"pytools.convergence.EOCRecorder",
"numpy.array",
"grudge.op.local_grad",
"mirgecom.flux.gradient_flux_central",
"pytools.obj_array.make_obj_array",
"grudge.dt_utils.h_max_f... | [((1839, 1866), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1856, 1866), False, 'import logging\n'), ((1870, 1920), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transport_model"""', '[0, 1]'], {}), "('transport_model', [0, 1])\n", (1893, 1920), False, 'import pytest\n'), ((4128, 4171), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[2, 3, 4]'], {}), "('order', [2, 3, 4])\n", (4151, 4171), False, 'import pytest\n'), ((4173, 4222), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kappa"""', '[0.0, 1.0, 2.3]'], {}), "('kappa', [0.0, 1.0, 2.3])\n", (4196, 4222), False, 'import pytest\n'), ((14728, 14782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_valued"""', '[False, True]'], {}), "('array_valued', [False, True])\n", (14751, 14782), False, 'import pytest\n'), ((14784, 14825), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[1, 2, 3]'], {}), "('dim', [1, 2, 3])\n", (14807, 14825), False, 'import pytest\n'), ((16280, 16321), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[1, 2, 3]'], {}), "('dim', [1, 2, 3])\n", (16303, 16321), False, 'import pytest\n'), ((16323, 16367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mu"""', '[-1, 0, 1, 2]'], {}), "('mu', [-1, 0, 1, 2])\n", (16346, 16367), False, 'import pytest\n'), ((16369, 16407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""vel"""', '[0, 1]'], {}), "('vel', [0, 1])\n", (16392, 16407), False, 'import pytest\n'), ((2177, 2275), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (2203, 2275), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((2314, 2360), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (2335, 2360), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((2689, 2741), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (2703, 2741), False, 'from pytools.obj_array import make_obj_array\n'), ((2822, 2881), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom'}), '(dim, mass=mass, energy=energy, momentum=mom)\n', (2836, 2881), False, 'from mirgecom.fluid import make_conserved\n'), ((3106, 3146), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (3120, 3146), False, 'from mirgecom.eos import IdealSingleGas\n'), ((3274, 3317), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (3282, 3317), True, 'import numpy as np\n'), ((3337, 3380), 'numpy.array', 'np.array', (['[[1, 4, 7], [2, 5, 8], [3, 6, 9]]'], {}), '([[1, 4, 7], [2, 5, 8], [3, 6, 9]])\n', (3345, 3380), True, 'import numpy as np\n'), ((3554, 3600), 'mirgecom.viscous.viscous_stress_tensor', 'viscous_stress_tensor', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (3575, 3600), False, 'from mirgecom.viscous import viscous_stress_tensor\n'), ((4051, 4124), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'gen', ([], {'a': 'a', 'b': 'b', 'npoints_per_axis': 'n', 'boundary_tag_to_face': 'bttf', 'mesh_type': 't'}), '(a=a, b=b, npoints_per_axis=n, boundary_tag_to_face=bttf, mesh_type=t)\n', (4054, 4124), True, 'from meshmode.mesh.generation import generate_regular_rect_mesh as gen\n'), ((4448, 4461), 'pytools.convergence.EOCRecorder', 'EOCRecorder', ([], {}), '()\n', (4459, 4461), False, 'from pytools.convergence import EOCRecorder\n'), ((4478, 4491), 'pytools.convergence.EOCRecorder', 'EOCRecorder', ([], {}), '()\n', (4489, 4491), False, 'from pytools.convergence import EOCRecorder\n'), ((4755, 4854), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'spec_diffusivity'}), '(viscosity=mu, thermal_conductivity=kappa,\n species_diffusivity=spec_diffusivity)\n', (4770, 4854), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((5074, 5121), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'transport_model'}), '(transport_model=transport_model)\n', (5088, 5121), False, 'from mirgecom.eos import IdealSingleGas\n'), ((5196, 5232), 'mirgecom.initializers.PlanarPoiseuille', 'PlanarPoiseuille', ([], {'density': 'rho', 'mu': 'mu'}), '(density=rho, mu=mu)\n', (5212, 5232), False, 'from mirgecom.initializers import PlanarPoiseuille\n'), ((10135, 10233), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (10161, 10233), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((10272, 10318), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (10293, 10318), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((10647, 10699), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (10661, 10699), False, 'from pytools.obj_array import make_obj_array\n'), ((11204, 11295), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (11218, 11295), False, 'from mirgecom.fluid import make_conserved\n'), ((11571, 11683), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': 'mu_b', 'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'd_alpha'}), '(bulk_viscosity=mu_b, viscosity=mu, thermal_conductivity=\n kappa, species_diffusivity=d_alpha)\n', (11586, 11683), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((11752, 11792), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (11766, 11792), False, 'from mirgecom.eos import IdealSingleGas\n'), ((11850, 11889), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (11864, 11889), False, 'from mirgecom.viscous import diffusive_flux\n'), ((12551, 12649), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (12577, 12649), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((12688, 12734), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (12709, 12734), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((13063, 13115), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[velocity_x, velocity_y, velocity_z]'], {}), '([velocity_x, velocity_y, velocity_z])\n', (13077, 13115), False, 'from pytools.obj_array import make_obj_array\n'), ((13620, 13711), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (13634, 13711), False, 'from mirgecom.fluid import make_conserved\n'), ((13986, 14098), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': 'mu_b', 'viscosity': 'mu', 'thermal_conductivity': 'kappa', 'species_diffusivity': 'd_alpha'}), '(bulk_viscosity=mu_b, viscosity=mu, thermal_conductivity=\n kappa, species_diffusivity=d_alpha)\n', (14001, 14098), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((14167, 14207), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (14181, 14207), False, 'from mirgecom.eos import IdealSingleGas\n'), ((14265, 14304), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (14279, 14304), False, 'from mirgecom.viscous import diffusive_flux\n'), ((15075, 15173), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (15101, 15173), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((15212, 15258), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (15233, 15258), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((15551, 15590), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {'dtype': 'object'}), '([1.0, 2.0, 3.0], dtype=object)\n', (15559, 15590), True, 'import numpy as np\n'), ((15598, 15689), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (15612, 15689), False, 'from mirgecom.fluid import make_conserved\n'), ((15730, 15755), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (15738, 15755), True, 'import numpy as np\n'), ((15859, 15909), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'species_diffusivity': 'd_alpha_input'}), '(species_diffusivity=d_alpha_input)\n', (15874, 15909), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((15920, 15960), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (15934, 15960), False, 'from mirgecom.eos import IdealSingleGas\n'), ((16164, 16219), 'mirgecom.viscous.get_local_max_species_diffusivity', 'get_local_max_species_diffusivity', (['actx', 'discr', 'd_alpha'], {}), '(actx, discr, d_alpha)\n', (16197, 16219), False, 'from mirgecom.viscous import get_local_max_species_diffusivity\n'), ((16615, 16713), 'meshmode.mesh.generation.generate_regular_rect_mesh', 'generate_regular_rect_mesh', ([], {'a': '((1.0,) * dim)', 'b': '((2.0,) * dim)', 'nelements_per_axis': '((nel_1d,) * dim)'}), '(a=(1.0,) * dim, b=(2.0,) * dim,\n nelements_per_axis=(nel_1d,) * dim)\n', (16641, 16713), False, 'from meshmode.mesh.generation import generate_regular_rect_mesh\n'), ((16752, 16798), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (16773, 16798), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((17101, 17192), 'mirgecom.fluid.make_conserved', 'make_conserved', (['dim'], {'mass': 'mass', 'energy': 'energy', 'momentum': 'mom', 'species_mass': 'species_mass'}), '(dim, mass=mass, energy=energy, momentum=mom, species_mass=\n species_mass)\n', (17115, 17192), False, 'from mirgecom.fluid import make_conserved\n'), ((17285, 17325), 'grudge.dt_utils.characteristic_lengthscales', 'characteristic_lengthscales', (['actx', 'discr'], {}), '(actx, discr)\n', (17312, 17325), False, 'from grudge.dt_utils import characteristic_lengthscales\n'), ((17378, 17408), 'grudge.op.nodal_min', 'nodal_min', (['discr', '"""vol"""', 'chlen'], {}), "(discr, 'vol', chlen)\n", (17387, 17408), False, 'from grudge.op import nodal_min\n'), ((17556, 17596), 'mirgecom.eos.IdealSingleGas', 'IdealSingleGas', ([], {'transport_model': 'tv_model'}), '(transport_model=tv_model)\n', (17570, 17596), False, 'from mirgecom.eos import IdealSingleGas\n'), ((17667, 17703), 'mirgecom.viscous.get_viscous_timestep', 'get_viscous_timestep', (['discr', 'eos', 'cv'], {}), '(discr, eos, cv)\n', (17687, 17703), False, 'from mirgecom.viscous import get_viscous_timestep\n'), ((2995, 3045), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'bulk_viscosity': '(1.0)', 'viscosity': '(0.5)'}), '(bulk_viscosity=1.0, viscosity=0.5)\n', (3010, 3045), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((3075, 3094), 'mirgecom.transport.PowerLawTransport', 'PowerLawTransport', ([], {}), '()\n', (3092, 3094), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((4715, 4732), 'numpy.ones', 'np.ones', (['nspecies'], {}), '(nspecies)\n', (4722, 4732), True, 'import numpy as np\n'), ((5642, 5682), 'mirgecom.flux.gradient_flux_central', 'gradient_flux_central', (['int_tpair', 'normal'], {}), '(int_tpair, normal)\n', (5663, 5682), False, 'from mirgecom.flux import gradient_flux_central\n'), ((6062, 6111), 'grudge.trace_pair.TracePair', 'TracePair', (['btag'], {'interior': 'cv_bnd', 'exterior': 'cv_bnd'}), '(btag, interior=cv_bnd, exterior=cv_bnd)\n', (6071, 6111), False, 'from grudge.trace_pair import TracePair\n'), ((6132, 6174), 'mirgecom.flux.gradient_flux_central', 'gradient_flux_central', (['bnd_tpair', 'bnd_nhat'], {}), '(bnd_tpair, bnd_nhat)\n', (6153, 6174), False, 'from mirgecom.flux import gradient_flux_central\n'), ((6578, 6624), 'grudge.eager.EagerDGDiscretization', 'EagerDGDiscretization', (['actx', 'mesh'], {'order': 'order'}), '(actx, mesh, order=order)\n', (6599, 6624), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((6773, 6797), 'grudge.dt_utils.h_max_from_volume', 'h_max_from_volume', (['discr'], {}), '(discr)\n', (6790, 6797), False, 'from grudge.dt_utils import h_max_from_volume\n'), ((6893, 6923), 'grudge.eager.interior_trace_pair', 'interior_trace_pair', (['discr', 'cv'], {}), '(discr, cv)\n', (6912, 6923), False, 'from grudge.eager import EagerDGDiscretization, interior_trace_pair\n'), ((8108, 8144), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['[dpdx * ones, zeros]'], {}), '([dpdx * ones, zeros])\n', (8122, 8144), False, 'from pytools.obj_array import make_obj_array\n'), ((8160, 8190), 'grudge.op.local_grad', 'op.local_grad', (['discr', 'pressure'], {}), '(discr, pressure)\n', (8173, 8190), True, 'import grudge.op as op\n'), ((8384, 8417), 'grudge.op.local_grad', 'op.local_grad', (['discr', 'temperature'], {}), '(discr, temperature)\n', (8397, 8417), True, 'import grudge.op as op\n'), ((8683, 8727), 'mirgecom.viscous.conductive_heat_flux', 'conductive_heat_flux', (['discr', 'eos', 'cv', 'grad_t'], {}), '(discr, eos, cv, grad_t)\n', (8703, 8727), False, 'from mirgecom.viscous import conductive_heat_flux\n'), ((8969, 9008), 'mirgecom.viscous.diffusive_flux', 'diffusive_flux', (['discr', 'eos', 'cv', 'grad_cv'], {}), '(discr, eos, cv, grad_cv)\n', (8983, 9008), False, 'from mirgecom.viscous import diffusive_flux\n'), ((9195, 9240), 'mirgecom.viscous.viscous_flux', 'viscous_flux', (['discr', 'eos', 'cv', 'grad_cv', 'grad_t'], {}), '(discr, eos, cv, grad_cv, grad_t)\n', (9207, 9240), False, 'from mirgecom.viscous import viscous_flux\n'), ((17515, 17544), 'mirgecom.transport.SimpleTransport', 'SimpleTransport', ([], {'viscosity': 'mu'}), '(viscosity=mu)\n', (17530, 17544), False, 'from mirgecom.transport import SimpleTransport, PowerLawTransport\n'), ((3477, 3486), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3483, 3486), True, 'import numpy as np\n'), ((8212, 8224), 'numpy.abs', 'np.abs', (['dpdx'], {}), '(dpdx)\n', (8218, 8224), True, 'import numpy as np\n'), ((9057, 9084), 'numpy.dot', 'np.dot', (['xp_tau', 'cv.velocity'], {}), '(xp_tau, cv.velocity)\n', (9063, 9084), True, 'import numpy as np\n'), ((17736, 17762), 'numpy.dot', 'np.dot', (['velocity', 'velocity'], {}), '(velocity, velocity)\n', (17742, 17762), True, 'import numpy as np\n')] |
# x_3_4
#
# mathモジュールからfloor関数だけインポートして切り捨て計算を行ってください
from statistics import mean
data = [7, 4, 3, 9]
print(mean(data))
| [
"statistics.mean"
] | [((111, 121), 'statistics.mean', 'mean', (['data'], {}), '(data)\n', (115, 121), False, 'from statistics import mean\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-30 23:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('widget_def', '0058_auto_20160617_1124'),
]
operations = [
migrations.AlterUniqueTogether(
name='geodatasetdeclaration',
unique_together=set([]),
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='dataset',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='frequency',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='location',
),
migrations.RemoveField(
model_name='geodatasetdeclaration',
name='theme',
),
migrations.RemoveField(
model_name='location',
name='geo_window',
),
migrations.AlterUniqueTogether(
name='widgetdeclaration',
unique_together=set([]),
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='definition',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='frequency',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='location',
),
migrations.RemoveField(
model_name='widgetdeclaration',
name='theme',
),
migrations.AddField(
model_name='graphdefinition',
name='cluster_label',
field=models.CharField(default=b'cluster', help_text=b'Not used for line graphs', max_length=120),
),
migrations.AddField(
model_name='graphdefinition',
name='dataset_label',
field=models.CharField(default=b'dataset', max_length=120),
),
migrations.DeleteModel(
name='Frequency',
),
migrations.DeleteModel(
name='GeoDatasetDeclaration',
),
migrations.DeleteModel(
name='Location',
),
migrations.DeleteModel(
name='Theme',
),
migrations.DeleteModel(
name='WidgetDeclaration',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((432, 506), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""geodatasetdeclaration"""', 'name': '"""dataset"""'}), "(model_name='geodatasetdeclaration', name='dataset')\n", (454, 506), False, 'from django.db import migrations, models\n'), ((551, 627), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""geodatasetdeclaration"""', 'name': '"""frequency"""'}), "(model_name='geodatasetdeclaration', name='frequency')\n", (573, 627), False, 'from django.db import migrations, models\n'), ((672, 747), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""geodatasetdeclaration"""', 'name': '"""location"""'}), "(model_name='geodatasetdeclaration', name='location')\n", (694, 747), False, 'from django.db import migrations, models\n'), ((792, 864), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""geodatasetdeclaration"""', 'name': '"""theme"""'}), "(model_name='geodatasetdeclaration', name='theme')\n", (814, 864), False, 'from django.db import migrations, models\n'), ((909, 973), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""location"""', 'name': '"""geo_window"""'}), "(model_name='location', name='geo_window')\n", (931, 973), False, 'from django.db import migrations, models\n'), ((1144, 1217), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""widgetdeclaration"""', 'name': '"""definition"""'}), "(model_name='widgetdeclaration', name='definition')\n", (1166, 1217), False, 'from django.db import migrations, models\n'), ((1262, 1334), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""widgetdeclaration"""', 'name': '"""frequency"""'}), "(model_name='widgetdeclaration', name='frequency')\n", (1284, 1334), False, 'from django.db import migrations, models\n'), ((1379, 1450), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""widgetdeclaration"""', 'name': '"""location"""'}), "(model_name='widgetdeclaration', name='location')\n", (1401, 1450), False, 'from django.db import migrations, models\n'), ((1495, 1563), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""widgetdeclaration"""', 'name': '"""theme"""'}), "(model_name='widgetdeclaration', name='theme')\n", (1517, 1563), False, 'from django.db import migrations, models\n'), ((2023, 2063), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Frequency"""'}), "(name='Frequency')\n", (2045, 2063), False, 'from django.db import migrations, models\n'), ((2096, 2148), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""GeoDatasetDeclaration"""'}), "(name='GeoDatasetDeclaration')\n", (2118, 2148), False, 'from django.db import migrations, models\n'), ((2181, 2220), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Location"""'}), "(name='Location')\n", (2203, 2220), False, 'from django.db import migrations, models\n'), ((2253, 2289), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Theme"""'}), "(name='Theme')\n", (2275, 2289), False, 'from django.db import migrations, models\n'), ((2322, 2370), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""WidgetDeclaration"""'}), "(name='WidgetDeclaration')\n", (2344, 2370), False, 'from django.db import migrations, models\n'), ((1723, 1818), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'cluster'", 'help_text': "b'Not used for line graphs'", 'max_length': '(120)'}), "(default=b'cluster', help_text=b'Not used for line graphs',\n max_length=120)\n", (1739, 1818), False, 'from django.db import migrations, models\n'), ((1950, 2002), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'dataset'", 'max_length': '(120)'}), "(default=b'dataset', max_length=120)\n", (1966, 2002), False, 'from django.db import migrations, models\n')] |
# ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
)
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type, value",
[
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
pytest.param(np.float64, np.float64(0.1234), marks=xfail_issue_40957),
pytest.param(np.int8, np.int8(-63), marks=xfail_issue_40957),
pytest.param(np.int16, np.int16(-12345), marks=xfail_issue_40957),
pytest.param(np.int32, np.int32(-123456), marks=xfail_issue_40957),
pytest.param(np.int64, np.int64(-1234567), marks=xfail_issue_40957),
pytest.param(np.uint8, np.uint8(63), marks=xfail_issue_40957),
pytest.param(np.uint16, np.uint16(12345), marks=xfail_issue_40957),
pytest.param(np.uint32, np.uint32(123456), marks=xfail_issue_40957),
pytest.param(np.uint64, np.uint64(1234567), marks=xfail_issue_40957),
],
)
def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type",
[
pytest.param(np.float32, marks=xfail_issue_40957),
pytest.param(np.float64, marks=xfail_issue_40957),
],
)
def test_constant_from_float_array(val_type):
np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, range_start, range_end",
[
(np.int8, -8, 8),
(np.int16, -64, 64),
(np.int32, -1024, 1024),
(np.int64, -16383, 16383),
(np.uint8, 0, 8),
(np.uint16, 0, 64),
(np.uint32, 0, 1024),
(np.uint64, 0, 16383),
],
)
def test_constant_from_integer_array(val_type, range_start, range_end):
np.random.seed(133391)
input_data = np.array(
np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type
)
result = run_op_numeric_data(input_data, ng.constant, val_type)
assert np.allclose(result, input_data)
def test_broadcast_numpy():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter)
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_broadcast_bidirectional():
data_shape = [16, 1, 1]
target_shape_shape = [4]
data_parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
target_shape_parameter = ng.parameter(
target_shape_shape, name="Target_shape", dtype=np.int64
)
node = ng.broadcast(data_parameter, target_shape_parameter, "BIDIRECTIONAL")
assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_axes = np.array([1], np.int32)
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2)
)
result = run_op_node([input_data], ng.gather, input_indices, input_axes)
assert np.allclose(result, expected)
def test_transpose():
input_tensor = np.arange(3 * 3 * 224 * 224, dtype=np.int32).reshape(
(3, 3, 224, 224)
)
input_order = np.array([0, 2, 3, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.transpose, input_order)
expected = np.transpose(input_tensor, input_order)
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."
)
def test_tile():
input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3))
repeats = np.array([2, 1], dtype=np.int32)
result = run_op_node([input_tensor], ng.tile, repeats)
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3))
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'"
)
def test_strided_slice():
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
begin = np.array([1, 0], dtype=np.int32)
end = np.array([0, 0], dtype=np.int32)
strides = np.array([1, 1], dtype=np.int32)
begin_mask = np.array([0, 0, 0], dtype=np.int32)
end_mask = np.array([0, 0, 0], dtype=np.int32)
new_axis_mask = np.array([0, 1, 0], dtype=np.int32)
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32)
ellipsis_mask = np.array([0, 0, 0], dtype=np.int32)
result = run_op_node(
[input_tensor],
ng.strided_slice,
begin,
end,
strides,
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask,
)
expected = np.array(
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32
).reshape((1, 3, 4))
assert np.allclose(result, expected)
def test_reshape_v1():
A = np.arange(1200, dtype=np.float32).reshape((2, 5, 5, 24))
shape = np.array([0, -1, 4], dtype=np.int32)
special_zero = True
expected_shape = np.array([2, 150, 4])
expected = np.reshape(A, expected_shape)
result = run_op_node([A], ng.reshape, shape, special_zero)
assert np.allclose(result, expected)
def test_shape_of():
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
result = run_op_node([input_tensor], ng.shape_of)
assert np.allclose(result, [3, 3])
| [
"numpy.uint8",
"numpy.random.rand",
"numpy.int32",
"numpy.array",
"numpy.arange",
"numpy.int8",
"numpy.int64",
"numpy.reshape",
"ngraph.concat",
"pytest.mark.xfail",
"numpy.float64",
"numpy.uint64",
"numpy.uint32",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"ngraph.para... | [((3265, 3515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val_type, range_start, range_end"""', '[(np.int8, -8, 8), (np.int16, -64, 64), (np.int32, -1024, 1024), (np.int64,\n -16383, 16383), (np.uint8, 0, 8), (np.uint16, 0, 64), (np.uint32, 0, \n 1024), (np.uint64, 0, 16383)]'], {}), "('val_type, range_start, range_end', [(np.int8, -8, \n 8), (np.int16, -64, 64), (np.int32, -1024, 1024), (np.int64, -16383, \n 16383), (np.uint8, 0, 8), (np.uint16, 0, 64), (np.uint32, 0, 1024), (np\n .uint64, 0, 16383)])\n", (3288, 3515), False, 'import pytest\n'), ((5610, 5744), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation."""'}), "(reason=\n 'Tile operation has a form that is not supported. Tile_2 should be converted to TileIE operation.'\n )\n", (5627, 5744), False, 'import pytest\n'), ((6059, 6177), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""RuntimeError: Check \'shape_size(get_input_shape(0)) == shape_size(output_shape)\'"""'}), '(reason=\n "RuntimeError: Check \'shape_size(get_input_shape(0)) == shape_size(output_shape)\'"\n )\n', (6076, 6177), False, 'import pytest\n'), ((973, 999), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (981, 999), True, 'import numpy as np\n'), ((1008, 1026), 'numpy.array', 'np.array', (['[[5, 6]]'], {}), '([[5, 6]])\n', (1016, 1026), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.concatenate', 'np.concatenate', (['(a, b)'], {'axis': '(0)'}), '((a, b), axis=0)\n', (1069, 1085), True, 'import numpy as np\n'), ((1101, 1114), 'tests.runtime.get_runtime', 'get_runtime', ([], {}), '()\n', (1112, 1114), False, 'from tests.runtime import get_runtime\n'), ((1274, 1317), 'ngraph.concat', 'ng.concat', (['[parameter_a, parameter_b]', 'axis'], {}), '([parameter_a, parameter_b], axis)\n', (1283, 1317), True, 'import ngraph as ng\n'), ((1430, 1459), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (1441, 1459), True, 'import numpy as np\n'), ((1647, 1678), 'numpy.array', 'np.array', (['value'], {'dtype': 'val_type'}), '(value, dtype=val_type)\n', (1655, 1678), True, 'import numpy as np\n'), ((1692, 1741), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['value', 'ng.constant', 'val_type'], {}), '(value, ng.constant, val_type)\n', (1711, 1741), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((1753, 1782), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (1764, 1782), True, 'import numpy as np\n'), ((2670, 2701), 'numpy.array', 'np.array', (['value'], {'dtype': 'val_type'}), '(value, dtype=val_type)\n', (2678, 2701), True, 'import numpy as np\n'), ((2715, 2764), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['value', 'ng.constant', 'val_type'], {}), '(value, ng.constant, val_type)\n', (2734, 2764), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((2776, 2805), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (2787, 2805), True, 'import numpy as np\n'), ((3033, 3055), 'numpy.random.seed', 'np.random.seed', (['(133391)'], {}), '(133391)\n', (3047, 3055), True, 'import numpy as np\n'), ((3145, 3199), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['input_data', 'ng.constant', 'val_type'], {}), '(input_data, ng.constant, val_type)\n', (3164, 3199), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((3211, 3242), 'numpy.allclose', 'np.allclose', (['result', 'input_data'], {}), '(result, input_data)\n', (3222, 3242), True, 'import numpy as np\n'), ((3659, 3681), 'numpy.random.seed', 'np.random.seed', (['(133391)'], {}), '(133391)\n', (3673, 3681), True, 'import numpy as np\n'), ((3807, 3861), 'tests.test_ngraph.util.run_op_numeric_data', 'run_op_numeric_data', (['input_data', 'ng.constant', 'val_type'], {}), '(input_data, ng.constant, val_type)\n', (3826, 3861), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((3873, 3904), 'numpy.allclose', 'np.allclose', (['result', 'input_data'], {}), '(result, input_data)\n', (3884, 3904), True, 'import numpy as np\n'), ((4014, 4069), 'ngraph.parameter', 'ng.parameter', (['data_shape'], {'name': '"""Data"""', 'dtype': 'np.float32'}), "(data_shape, name='Data', dtype=np.float32)\n", (4026, 4069), True, 'import ngraph as ng\n'), ((4099, 4168), 'ngraph.parameter', 'ng.parameter', (['target_shape_shape'], {'name': '"""Target_shape"""', 'dtype': 'np.int64'}), "(target_shape_shape, name='Target_shape', dtype=np.int64)\n", (4111, 4168), True, 'import ngraph as ng\n'), ((4195, 4247), 'ngraph.broadcast', 'ng.broadcast', (['data_parameter', 'target_shape_parameter'], {}), '(data_parameter, target_shape_parameter)\n', (4207, 4247), True, 'import ngraph as ng\n'), ((4452, 4507), 'ngraph.parameter', 'ng.parameter', (['data_shape'], {'name': '"""Data"""', 'dtype': 'np.float32'}), "(data_shape, name='Data', dtype=np.float32)\n", (4464, 4507), True, 'import ngraph as ng\n'), ((4537, 4606), 'ngraph.parameter', 'ng.parameter', (['target_shape_shape'], {'name': '"""Target_shape"""', 'dtype': 'np.int64'}), "(target_shape_shape, name='Target_shape', dtype=np.int64)\n", (4549, 4606), True, 'import ngraph as ng\n'), ((4633, 4702), 'ngraph.broadcast', 'ng.broadcast', (['data_parameter', 'target_shape_parameter', '"""BIDIRECTIONAL"""'], {}), "(data_parameter, target_shape_parameter, 'BIDIRECTIONAL')\n", (4645, 4702), True, 'import ngraph as ng\n'), ((5004, 5027), 'numpy.array', 'np.array', (['[1]', 'np.int32'], {}), '([1], np.int32)\n', (5012, 5027), True, 'import numpy as np\n'), ((5150, 5213), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_data]', 'ng.gather', 'input_indices', 'input_axes'], {}), '([input_data], ng.gather, input_indices, input_axes)\n', (5161, 5213), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((5225, 5254), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (5236, 5254), True, 'import numpy as np\n'), ((5401, 5439), 'numpy.array', 'np.array', (['[0, 2, 3, 1]'], {'dtype': 'np.int32'}), '([0, 2, 3, 1], dtype=np.int32)\n', (5409, 5439), True, 'import numpy as np\n'), ((5454, 5508), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.transpose', 'input_order'], {}), '([input_tensor], ng.transpose, input_order)\n', (5465, 5508), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((5525, 5564), 'numpy.transpose', 'np.transpose', (['input_tensor', 'input_order'], {}), '(input_tensor, input_order)\n', (5537, 5564), True, 'import numpy as np\n'), ((5577, 5606), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (5588, 5606), True, 'import numpy as np\n'), ((5839, 5871), 'numpy.array', 'np.array', (['[2, 1]'], {'dtype': 'np.int32'}), '([2, 1], dtype=np.int32)\n', (5847, 5871), True, 'import numpy as np\n'), ((5886, 5931), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.tile', 'repeats'], {}), '([input_tensor], ng.tile, repeats)\n', (5897, 5931), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((6026, 6055), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (6037, 6055), True, 'import numpy as np\n'), ((6289, 6321), 'numpy.array', 'np.array', (['[1, 0]'], {'dtype': 'np.int32'}), '([1, 0], dtype=np.int32)\n', (6297, 6321), True, 'import numpy as np\n'), ((6332, 6364), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.int32'}), '([0, 0], dtype=np.int32)\n', (6340, 6364), True, 'import numpy as np\n'), ((6379, 6411), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'np.int32'}), '([1, 1], dtype=np.int32)\n', (6387, 6411), True, 'import numpy as np\n'), ((6429, 6464), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6437, 6464), True, 'import numpy as np\n'), ((6480, 6515), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6488, 6515), True, 'import numpy as np\n'), ((6536, 6571), 'numpy.array', 'np.array', (['[0, 1, 0]'], {'dtype': 'np.int32'}), '([0, 1, 0], dtype=np.int32)\n', (6544, 6571), True, 'import numpy as np\n'), ((6595, 6630), 'numpy.array', 'np.array', (['[1, 0, 0]'], {'dtype': 'np.int32'}), '([1, 0, 0], dtype=np.int32)\n', (6603, 6630), True, 'import numpy as np\n'), ((6651, 6686), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (6659, 6686), True, 'import numpy as np\n'), ((6701, 6841), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.strided_slice', 'begin', 'end', 'strides', 'begin_mask', 'end_mask', 'new_axis_mask', 'shrink_axis_mask', 'ellipsis_mask'], {}), '([input_tensor], ng.strided_slice, begin, end, strides,\n begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask)\n', (6712, 6841), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7063, 7092), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (7074, 7092), True, 'import numpy as np\n'), ((7195, 7231), 'numpy.array', 'np.array', (['[0, -1, 4]'], {'dtype': 'np.int32'}), '([0, -1, 4], dtype=np.int32)\n', (7203, 7231), True, 'import numpy as np\n'), ((7278, 7299), 'numpy.array', 'np.array', (['[2, 150, 4]'], {}), '([2, 150, 4])\n', (7286, 7299), True, 'import numpy as np\n'), ((7315, 7344), 'numpy.reshape', 'np.reshape', (['A', 'expected_shape'], {}), '(A, expected_shape)\n', (7325, 7344), True, 'import numpy as np\n'), ((7358, 7407), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[A]', 'ng.reshape', 'shape', 'special_zero'], {}), '([A], ng.reshape, shape, special_zero)\n', (7369, 7407), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7420, 7449), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (7431, 7449), True, 'import numpy as np\n'), ((7492, 7553), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float32'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\n', (7500, 7553), True, 'import numpy as np\n'), ((7568, 7608), 'tests.test_ngraph.util.run_op_node', 'run_op_node', (['[input_tensor]', 'ng.shape_of'], {}), '([input_tensor], ng.shape_of)\n', (7579, 7608), False, 'from tests.test_ngraph.util import run_op_node, run_op_numeric_data\n'), ((7621, 7648), 'numpy.allclose', 'np.allclose', (['result', '[3, 3]'], {}), '(result, [3, 3])\n', (7632, 7648), True, 'import numpy as np\n'), ((2864, 2913), 'pytest.param', 'pytest.param', (['np.float32'], {'marks': 'xfail_issue_40957'}), '(np.float32, marks=xfail_issue_40957)\n', (2876, 2913), False, 'import pytest\n'), ((2923, 2972), 'pytest.param', 'pytest.param', (['np.float64'], {'marks': 'xfail_issue_40957'}), '(np.float64, marks=xfail_issue_40957)\n', (2935, 2972), False, 'import pytest\n'), ((3717, 3771), 'numpy.random.randint', 'np.random.randint', (['range_start', 'range_end'], {'size': '(2, 2)'}), '(range_start, range_end, size=(2, 2))\n', (3734, 3771), True, 'import numpy as np\n'), ((1553, 1581), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'bool'}), '((2, 2), dtype=bool)\n', (1561, 1581), True, 'import numpy as np\n'), ((1873, 1891), 'numpy.float32', 'np.float32', (['(0.1234)'], {}), '(0.1234)\n', (1883, 1891), True, 'import numpy as np\n'), ((1952, 1970), 'numpy.float64', 'np.float64', (['(0.1234)'], {}), '(0.1234)\n', (1962, 1970), True, 'import numpy as np\n'), ((2028, 2040), 'numpy.int8', 'np.int8', (['(-63)'], {}), '(-63)\n', (2035, 2040), True, 'import numpy as np\n'), ((2099, 2115), 'numpy.int16', 'np.int16', (['(-12345)'], {}), '(-12345)\n', (2107, 2115), True, 'import numpy as np\n'), ((2174, 2191), 'numpy.int32', 'np.int32', (['(-123456)'], {}), '(-123456)\n', (2182, 2191), True, 'import numpy as np\n'), ((2250, 2268), 'numpy.int64', 'np.int64', (['(-1234567)'], {}), '(-1234567)\n', (2258, 2268), True, 'import numpy as np\n'), ((2327, 2339), 'numpy.uint8', 'np.uint8', (['(63)'], {}), '(63)\n', (2335, 2339), True, 'import numpy as np\n'), ((2399, 2415), 'numpy.uint16', 'np.uint16', (['(12345)'], {}), '(12345)\n', (2408, 2415), True, 'import numpy as np\n'), ((2475, 2492), 'numpy.uint32', 'np.uint32', (['(123456)'], {}), '(123456)\n', (2484, 2492), True, 'import numpy as np\n'), ((2552, 2570), 'numpy.uint64', 'np.uint64', (['(1234567)'], {}), '(1234567)\n', (2561, 2570), True, 'import numpy as np\n'), ((4828, 4895), 'numpy.array', 'np.array', (['[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2]', 'np.float32'], {}), '([1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32)\n', (4836, 4895), True, 'import numpy as np\n'), ((4946, 4972), 'numpy.array', 'np.array', (['[0, 2]', 'np.int32'], {}), '([0, 2], np.int32)\n', (4954, 4972), True, 'import numpy as np\n'), ((5044, 5102), 'numpy.array', 'np.array', (['[1.0, 1.2, 2.0, 2.2, 3.0, 3.2]'], {'dtype': 'np.float32'}), '([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32)\n', (5052, 5102), True, 'import numpy as np\n'), ((5298, 5342), 'numpy.arange', 'np.arange', (['(3 * 3 * 224 * 224)'], {'dtype': 'np.int32'}), '(3 * 3 * 224 * 224, dtype=np.int32)\n', (5307, 5342), True, 'import numpy as np\n'), ((5777, 5805), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.int32'}), '(6, dtype=np.int32)\n', (5786, 5805), True, 'import numpy as np\n'), ((5948, 5994), 'numpy.array', 'np.array', (['[0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]'], {}), '([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5])\n', (5956, 5994), True, 'import numpy as np\n'), ((6219, 6257), 'numpy.arange', 'np.arange', (['(2 * 3 * 4)'], {'dtype': 'np.float32'}), '(2 * 3 * 4, dtype=np.float32)\n', (6228, 6257), True, 'import numpy as np\n'), ((6941, 7017), 'numpy.array', 'np.array', (['[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]'], {'dtype': 'np.float32'}), '([12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32)\n', (6949, 7017), True, 'import numpy as np\n'), ((7126, 7159), 'numpy.arange', 'np.arange', (['(1200)'], {'dtype': 'np.float32'}), '(1200, dtype=np.float32)\n', (7135, 7159), True, 'import numpy as np\n'), ((3087, 3110), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (3101, 3110), True, 'import numpy as np\n')] |
# Copyright (c) 2013 Red Hat, Inc.
# Copyright 2014 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from oslo_db.sqlalchemy import engines
from osprofiler import profiler
from osprofiler import sqlalchemy as sa_tracer
import sqlalchemy as sa
from zaqar.common import decorators
from zaqar.conf import drivers_management_store_sqlalchemy
from zaqar import storage
from zaqar.storage.sqlalchemy import controllers
class ControlDriver(storage.ControlDriverBase):
def __init__(self, conf, cache):
super(ControlDriver, self).__init__(conf, cache)
self.conf.register_opts(
drivers_management_store_sqlalchemy.ALL_OPTS,
group=drivers_management_store_sqlalchemy.GROUP_NAME)
self.sqlalchemy_conf = self.conf[
drivers_management_store_sqlalchemy.GROUP_NAME]
def _mysql_on_connect(self, conn, record):
# NOTE(flaper87): This is necessary in order
# to ensure that all date operations in mysql
# happen in UTC, `now()` for example.
conn.query('SET time_zone = "+0:00"')
@decorators.lazy_property(write=False)
def engine(self):
uri = self.sqlalchemy_conf.uri
engine = engines.create_engine(uri, sqlite_fk=True)
if (uri.startswith('mysql://') or
uri.startswith('mysql+pymysql://')):
# oslo_db.create_engine makes a test connection, throw that out
# first. mysql time_zone can be added to oslo_db as a
# startup option
engine.dispose()
sa.event.listen(engine, 'connect',
self._mysql_on_connect)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_message_store):
sa_tracer.add_tracing(sa, engine, "db")
return engine
# TODO(cpp-cabrera): expose connect/close as a context manager
# that acquires the connection to the DB for the desired scope and
# closes it once the operations are completed
# TODO(wangxiyuan): we should migrate to oslo.db asap.
def run(self, *args, **kwargs):
return self.engine.execute(*args, **kwargs)
def close(self):
pass
@property
def pools_controller(self):
controller = controllers.PoolsController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_pools_"
"controller")(controller)
else:
return controller
@property
def queue_controller(self):
controller = controllers.QueueController(self)
if (self.conf.profiler.enabled and
(self.conf.profiler.trace_message_store or
self.conf.profiler.trace_management_store)):
return profiler.trace_cls("sqlalchemy_queue_"
"controller")(controller)
else:
return controller
@property
def catalogue_controller(self):
controller = controllers.CatalogueController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_catalogue_"
"controller")(controller)
else:
return controller
@property
def flavors_controller(self):
controller = controllers.FlavorsController(self)
if (self.conf.profiler.enabled and
self.conf.profiler.trace_management_store):
return profiler.trace_cls("sqlalchemy_flavors_"
"controller")(controller)
else:
return controller
@property
def topic_controller(self):
pass
| [
"zaqar.storage.sqlalchemy.controllers.PoolsController",
"zaqar.common.decorators.lazy_property",
"zaqar.storage.sqlalchemy.controllers.CatalogueController",
"zaqar.storage.sqlalchemy.controllers.QueueController",
"oslo_db.sqlalchemy.engines.create_engine",
"zaqar.storage.sqlalchemy.controllers.FlavorsCont... | [((1585, 1622), 'zaqar.common.decorators.lazy_property', 'decorators.lazy_property', ([], {'write': '(False)'}), '(write=False)\n', (1609, 1622), False, 'from zaqar.common import decorators\n'), ((1701, 1743), 'oslo_db.sqlalchemy.engines.create_engine', 'engines.create_engine', (['uri'], {'sqlite_fk': '(True)'}), '(uri, sqlite_fk=True)\n', (1722, 1743), False, 'from oslo_db.sqlalchemy import engines\n'), ((2756, 2789), 'zaqar.storage.sqlalchemy.controllers.PoolsController', 'controllers.PoolsController', (['self'], {}), '(self)\n', (2783, 2789), False, 'from zaqar.storage.sqlalchemy import controllers\n'), ((3127, 3160), 'zaqar.storage.sqlalchemy.controllers.QueueController', 'controllers.QueueController', (['self'], {}), '(self)\n', (3154, 3160), False, 'from zaqar.storage.sqlalchemy import controllers\n'), ((3566, 3603), 'zaqar.storage.sqlalchemy.controllers.CatalogueController', 'controllers.CatalogueController', (['self'], {}), '(self)\n', (3597, 3603), False, 'from zaqar.storage.sqlalchemy import controllers\n'), ((3947, 3982), 'zaqar.storage.sqlalchemy.controllers.FlavorsController', 'controllers.FlavorsController', (['self'], {}), '(self)\n', (3976, 3982), False, 'from zaqar.storage.sqlalchemy import controllers\n'), ((2054, 2112), 'sqlalchemy.event.listen', 'sa.event.listen', (['engine', '"""connect"""', 'self._mysql_on_connect'], {}), "(engine, 'connect', self._mysql_on_connect)\n", (2069, 2112), True, 'import sqlalchemy as sa\n'), ((2254, 2293), 'osprofiler.sqlalchemy.add_tracing', 'sa_tracer.add_tracing', (['sa', 'engine', '"""db"""'], {}), "(sa, engine, 'db')\n", (2275, 2293), True, 'from osprofiler import sqlalchemy as sa_tracer\n'), ((2912, 2961), 'osprofiler.profiler.trace_cls', 'profiler.trace_cls', (['"""sqlalchemy_pools_controller"""'], {}), "('sqlalchemy_pools_controller')\n", (2930, 2961), False, 'from osprofiler import profiler\n'), ((3347, 3396), 'osprofiler.profiler.trace_cls', 'profiler.trace_cls', (['"""sqlalchemy_queue_controller"""'], {}), "('sqlalchemy_queue_controller')\n", (3365, 3396), False, 'from osprofiler import profiler\n'), ((3726, 3779), 'osprofiler.profiler.trace_cls', 'profiler.trace_cls', (['"""sqlalchemy_catalogue_controller"""'], {}), "('sqlalchemy_catalogue_controller')\n", (3744, 3779), False, 'from osprofiler import profiler\n'), ((4105, 4156), 'osprofiler.profiler.trace_cls', 'profiler.trace_cls', (['"""sqlalchemy_flavors_controller"""'], {}), "('sqlalchemy_flavors_controller')\n", (4123, 4156), False, 'from osprofiler import profiler\n')] |
### this works on linux only
try:
import fcntl
import subprocess
import signal
import os
except:
session.flash='sorry, only on Unix systems'
redirect(URL(request.application,'default','site'))
forever=10**8
def kill():
p = cache.ram('gae_upload',lambda:None,forever)
if not p or p.poll()!=None:
return 'oops'
os.kill(p.pid, signal.SIGKILL)
cache.ram('gae_upload',lambda:None,-1)
def deploy():
if not os.path.exists(GAE_APPCFG):
redirect(URL(request.application,'default','site'))
regex = re.compile('^\w+$')
apps = sorted([(file.upper(), file) for file in \
os.listdir(apath(r=request)) if regex.match(file)])
options = [OPTION(item[1]) for item in apps]
form = FORM(TABLE(TR('Applications to deploy',
SELECT(_name='applications',_multiple='multiple',
_id='applications',*options)),
TR('GAE Email:',
INPUT(_name='email',requires=IS_EMAIL())),
TR('GAE Password:',
INPUT(_name='password',_type='password',
requires=IS_NOT_EMPTY())),
TR('',INPUT(_type='submit',value='deploy'))))
cmd = output = errors= ""
if form.accepts(request.vars,session):
try:
kill()
except:
pass
ignore_apps = [item[1] for item in apps \
if not item[1] in request.vars.applications]
regex = re.compile('\(applications/\(.*')
yaml = apath('../app.yaml', r=request)
data=open(yaml,'r').read()
data = regex.sub('(applications/(%s)/.*)|' % '|'.join(ignore_apps),data)
open(yaml,'w').write(data)
path = request.env.web2py_path
cmd = '%s --email=%s --passin update %s' % \
(GAE_APPCFG, form.vars.email, path)
p = cache.ram('gae_upload',
lambda s=subprocess,c=cmd:s.Popen(c, shell=True,
stdin=s.PIPE,
stdout=s.PIPE,
stderr=s.PIPE, close_fds=True),-1)
p.stdin.write(form.vars.password)
fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(p.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
return dict(form=form,command=cmd)
def callback():
p = cache.ram('gae_upload',lambda:None,forever)
if not p or p.poll()!=None:
return '<done/>'
try:
output = p.stdout.read()
except:
output=''
try:
errors = p.stderr.read()
except:
errors=''
return (output+errors).replace('\n','<br/>')
| [
"os.path.exists",
"os.kill"
] | [((356, 386), 'os.kill', 'os.kill', (['p.pid', 'signal.SIGKILL'], {}), '(p.pid, signal.SIGKILL)\n', (363, 386), False, 'import os\n'), ((456, 482), 'os.path.exists', 'os.path.exists', (['GAE_APPCFG'], {}), '(GAE_APPCFG)\n', (470, 482), False, 'import os\n')] |
import torch
import torch.nn as nn
from basicts.archs.AGCRN_arch.AGCN import AVWGCN
class AGCRNCell(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim):
super(AGCRNCell, self).__init__()
self.node_num = node_num
self.hidden_dim = dim_out
self.gate = AVWGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim)
self.update = AVWGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim)
def forward(self, x, state, node_embeddings):
#x: B, num_nodes, input_dim
#state: B, num_nodes, hidden_dim
state = state.to(x.device)
input_and_state = torch.cat((x, state), dim=-1)
z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings))
z, r = torch.split(z_r, self.hidden_dim, dim=-1)
candidate = torch.cat((x, z*state), dim=-1)
hc = torch.tanh(self.update(candidate, node_embeddings))
h = r*state + (1-r)*hc
return h
def init_hidden_state(self, batch_size):
return torch.zeros(batch_size, self.node_num, self.hidden_dim) | [
"torch.split",
"basicts.archs.AGCRN_arch.AGCN.AVWGCN",
"torch.zeros",
"torch.cat"
] | [((322, 386), 'basicts.archs.AGCRN_arch.AGCN.AVWGCN', 'AVWGCN', (['(dim_in + self.hidden_dim)', '(2 * dim_out)', 'cheb_k', 'embed_dim'], {}), '(dim_in + self.hidden_dim, 2 * dim_out, cheb_k, embed_dim)\n', (328, 386), False, 'from basicts.archs.AGCRN_arch.AGCN import AVWGCN\n'), ((406, 466), 'basicts.archs.AGCRN_arch.AGCN.AVWGCN', 'AVWGCN', (['(dim_in + self.hidden_dim)', 'dim_out', 'cheb_k', 'embed_dim'], {}), '(dim_in + self.hidden_dim, dim_out, cheb_k, embed_dim)\n', (412, 466), False, 'from basicts.archs.AGCRN_arch.AGCN import AVWGCN\n'), ((660, 689), 'torch.cat', 'torch.cat', (['(x, state)'], {'dim': '(-1)'}), '((x, state), dim=-1)\n', (669, 689), False, 'import torch\n'), ((780, 821), 'torch.split', 'torch.split', (['z_r', 'self.hidden_dim'], {'dim': '(-1)'}), '(z_r, self.hidden_dim, dim=-1)\n', (791, 821), False, 'import torch\n'), ((843, 876), 'torch.cat', 'torch.cat', (['(x, z * state)'], {'dim': '(-1)'}), '((x, z * state), dim=-1)\n', (852, 876), False, 'import torch\n'), ((1055, 1110), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.node_num', 'self.hidden_dim'], {}), '(batch_size, self.node_num, self.hidden_dim)\n', (1066, 1110), False, 'import torch\n')] |
from numpy import log10, isnan
def signOfFeasible(p):
r = '-'
if p.isFeas(p.xk): r = '+'
return r
textOutputDict = {\
'objFunVal': lambda p: p.iterObjFunTextFormat % (-p.Fk if p.invertObjFunc else p.Fk),
'log10(maxResidual)': lambda p: '%0.2f' % log10(p.rk+1e-100),
'log10(MaxResidual/ConTol)':lambda p: '%0.2f' % log10(max((p.rk/p.contol, 1e-100))),
'residual':lambda p: '%0.1e' % p._Residual,
'isFeasible': signOfFeasible,
'nSolutions': lambda p: '%d' % p._nObtainedSolutions,
'front length':lambda p: '%d' % p._frontLength,
'outcome': lambda p: ('%+d' % -p._nOutcome if p._nOutcome != 0 else ''),
'income': lambda p: ('%+d' % p._nIncome if p._nIncome != 0 else ''),
'f*_distance_estim': lambda p: ('%0.1g' % p.f_bound_distance if not isnan(p.f_bound_distance) else 'N/A'),
'f*_bound_estim': lambda p: (p.iterObjFunTextFormat % \
p.f_bound_estimation) if not isnan(p.f_bound_estimation) else 'N/A',
}
delimiter = ' '
class ooTextOutput:
def __init__(self):
pass
def iterPrint(self):
if self.lastPrintedIter == self.iter: return
if self.iter == 0 and self.iprint >= 0: # 0th iter (start)
s = ' iter' + delimiter
for fn in self.data4TextOutput:
s += fn + delimiter
self.disp(s)
elif self.iprint<0 or \
(((self.iprint>0 and self.iter % self.iprint != 0) or self.iprint==0) and not(self.isFinished or self.iter == 0)):
return
s = str(self.iter).rjust(5) + ' '
for columnName in self.data4TextOutput:
val = textOutputDict[columnName](self)
#nWhole = length(columnName)
s += val.rjust(len(columnName)) + ' '
self.disp(s)
self.lastPrintedIter = self.iter
| [
"numpy.log10",
"numpy.isnan"
] | [((260, 280), 'numpy.log10', 'log10', (['(p.rk + 1e-100)'], {}), '(p.rk + 1e-100)\n', (265, 280), False, 'from numpy import log10, isnan\n'), ((759, 784), 'numpy.isnan', 'isnan', (['p.f_bound_distance'], {}), '(p.f_bound_distance)\n', (764, 784), False, 'from numpy import log10, isnan\n'), ((884, 911), 'numpy.isnan', 'isnan', (['p.f_bound_estimation'], {}), '(p.f_bound_estimation)\n', (889, 911), False, 'from numpy import log10, isnan\n')] |
import os
from main import main
from pprint import pprint
def parse(lines):
# world bounds
wx = int(lines[0].split()[0])
wy = int(lines[0].split()[0])
# initial position
x = int(lines[1].split()[0])
y = int(lines[1].split()[1])
cmds = []
# command / step pair
it = iter(lines[2].split())
for e in it:
cmds.append((e, int(next(it))))
# health and speed
health = float(lines[3].split()[0])
speed = float(lines[3].split()[1])
# spawn times
nspawn = int(lines[4])
spawns = []
for i in range(nspawn):
spawns.append(int(lines[4 + i + 1]))
# damage and range
damage = float(lines[4 + nspawn + 1].split()[0])
towerrange = int(lines[4 + nspawn + 1].split()[1])
# queries
t = int(lines[4 + nspawn + 2])
towers = []
for i in range(t):
towertxt = lines[4 + nspawn + 3 + i]
towerx = int(towertxt.split()[0])
towery = int(towertxt.split()[1])
towers.append((towerx, towery))
return {
"wx": wx, "wy": wy,
"x": x, "y": y,
"cmds": cmds,
"speed": speed,
"health": health,
"damage": damage,
"range": towerrange,
"spawns": spawns,
"towers": towers
}
if __name__ == "__main__":
level, quests = 4, 5
for i in range(1, quests + 1):
input_file = r'..\data\level{0}\level{0}_{1}.in'.format(level, i)
output_file = os.path.splitext(input_file)[0] + ".out"
with open(input_file, 'r') as fi:
data = parse(fi.readlines())
# pprint(data)
print("=== Output {}".format(i))
print("======================")
result = main(data)
pprint(result)
with open(output_file, 'w+') as fo:
fo.write(result)
| [
"main.main",
"os.path.splitext",
"pprint.pprint"
] | [((1713, 1723), 'main.main', 'main', (['data'], {}), '(data)\n', (1717, 1723), False, 'from main import main\n'), ((1736, 1750), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (1742, 1750), False, 'from pprint import pprint\n'), ((1449, 1477), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (1465, 1477), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import os
path = "/Users/petermarinov/msci project/electrode data/test data/data/"
filenames = []
for f in os.listdir(path):
if not f.startswith('.'):
filenames.append(f)
i=-12
data = np.genfromtxt(path + filenames[i])
V = np.zeros((200,200))
for i in range (0,200):
for j in range (0,200):
if data[j+200*i][0] == 0:
V[i,j] = -90.0
if data[j+200*i][0] >1:
V[i,j] = 20.-(110./data[j+200*i][1])*(data[j+200*i][0]-1)
if data[j+200*i][0] ==1:
V[i,j] = 20.
i1 = 50
k= 3
total = []
x=0 #dummy
elec = np.zeros((200,200,200))
for j1 in range(0,200):
for i in range (1,200):
for j in range (1,200):
#elec[j1,i,j] = np.divide(float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1])),float(((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)))
#x +=((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/((i-i1)**2+ (j-j1)**2 +k**2)**(3/2)
x += np.float((i-i1)*(V[i,j]-V[i-1,j])+(j-j1)*(V[i,j]-V[i,j-1]))/np.float(((i-i1)**2+(j-j1)**2+k**2)**3/2)
total.append(x)
x=0
plt.plot(total)
plt.xlabel("time [dimentionless]", fontsize = 18)
plt.ylabel("Voltage [mV]" , fontsize = 18)
plt.title("Electrode measurement for a healthy pacing heart")
plt.grid()
plt.show() | [
"os.listdir",
"matplotlib.pyplot.grid",
"numpy.float",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] | [((167, 183), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (177, 183), False, 'import os\n'), ((264, 298), 'numpy.genfromtxt', 'np.genfromtxt', (['(path + filenames[i])'], {}), '(path + filenames[i])\n', (277, 298), True, 'import numpy as np\n'), ((304, 324), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (312, 324), True, 'import numpy as np\n'), ((656, 681), 'numpy.zeros', 'np.zeros', (['(200, 200, 200)'], {}), '((200, 200, 200))\n', (664, 681), True, 'import numpy as np\n'), ((1211, 1226), 'matplotlib.pyplot.plot', 'plt.plot', (['total'], {}), '(total)\n', (1219, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [dimentionless]"""'], {'fontsize': '(18)'}), "('time [dimentionless]', fontsize=18)\n", (1238, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1318), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Voltage [mV]"""'], {'fontsize': '(18)'}), "('Voltage [mV]', fontsize=18)\n", (1289, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1384), 'matplotlib.pyplot.title', 'plt.title', (['"""Electrode measurement for a healthy pacing heart"""'], {}), "('Electrode measurement for a healthy pacing heart')\n", (1332, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1386, 1396), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1394, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1406, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1116), 'numpy.float', 'np.float', (['((i - i1) * (V[i, j] - V[i - 1, j]) + (j - j1) * (V[i, j] - V[i, j - 1]))'], {}), '((i - i1) * (V[i, j] - V[i - 1, j]) + (j - j1) * (V[i, j] - V[i, j -\n 1]))\n', (1039, 1116), True, 'import numpy as np\n'), ((1091, 1150), 'numpy.float', 'np.float', (['(((i - i1) ** 2 + (j - j1) ** 2 + k ** 2) ** 3 / 2)'], {}), '(((i - i1) ** 2 + (j - j1) ** 2 + k ** 2) ** 3 / 2)\n', (1099, 1150), True, 'import numpy as np\n')] |
from data_reader.reader import CsvReader
from util import *
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegression(object):
def __init__(self, learning_rate=0.01, epochs=50):
self.__epochs= epochs
self.__learning_rate = learning_rate
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.__epochs):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Get the activation using Sigmoid function
h = self.__activation(z)
# 3- Calculate the gradient
temp = X.T.dot(y - h)
# 4- Update the weights and bias using the gradient and learning rate
self.w_[1:] += self.__learning_rate * temp
self.w_[0] += self.__learning_rate * sum(temp)
# 5- Uncomment the cost collecting line
self.cost_.append(self.__logit_cost(y, self.__activation(z)))
def __logit_cost(self, y, y_val):
logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))
return logit
def __sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def __net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def __activation(self, X):
return self.__sigmoid(X)
def predict(self, X):
# 1- Calculate the net input W^T * x
z = self.__net_input(X)
# 2- Return the activated values (0 or 1 classes)
h = self.__activation(z)
return np.where(self.__activation(z) >= 0.5, 1, 0)
reader = CsvReader("./data/Iris.csv")
iris_features, iris_labels = reader.get_iris_data()
ignore_verginica = [i for i, v in enumerate(iris_labels) if v == 'Iris-virginica']
iris_features = [v for i, v in enumerate(iris_features) if i not in ignore_verginica]
iris_labels = [v for i, v in enumerate(iris_labels) if i not in ignore_verginica]
print(len(iris_features))
print(len(iris_labels))
iris_features, iris_labels = shuffle(iris_features, iris_labels)
iris_labels = to_onehot(iris_labels)
iris_labels = list(map(lambda v: v.index(max(v)), iris_labels))
train_x, train_y, test_x, test_y = iris_features[0:89], iris_labels[0:89], iris_features[89:], iris_labels[89:]
train_x, train_y, test_x, test_y = np.asarray(train_x), np.asarray(train_y), np.asarray(test_x), np.asarray(test_y)
train_x, means, stds = standardize(train_x)
test_x = standardize(test_x, means, stds)
lr = LogisticRegression(learning_rate=0.1, epochs=50)
lr.fit(train_x, train_y)
plt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))
plt.xlabel('Epochs')
plt.ylabel('Cost')
plt.title('Logistic Regression - Learning rate 0.1')
plt.tight_layout()
plt.show()
predicted_test = lr.predict(test_x)
print("Test Accuracy: " + str(((sum([predicted_test[i] == test_y[i] for i in range(0, len(predicted_test))]) / len(predicted_test)) * 100.0)) + "%")
| [
"numpy.log10",
"matplotlib.pyplot.ylabel",
"data_reader.reader.CsvReader",
"matplotlib.pyplot.xlabel",
"numpy.log",
"numpy.asarray",
"numpy.exp",
"numpy.zeros",
"numpy.dot",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1601, 1629), 'data_reader.reader.CsvReader', 'CsvReader', (['"""./data/Iris.csv"""'], {}), "('./data/Iris.csv')\n", (1610, 1629), False, 'from data_reader.reader import CsvReader\n'), ((2608, 2628), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2618, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2647), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (2639, 2647), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2700), 'matplotlib.pyplot.title', 'plt.title', (['"""Logistic Regression - Learning rate 0.1"""'], {}), "('Logistic Regression - Learning rate 0.1')\n", (2657, 2700), True, 'import matplotlib.pyplot as plt\n'), ((2702, 2720), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2718, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2729, 2731), True, 'import matplotlib.pyplot as plt\n'), ((2301, 2320), 'numpy.asarray', 'np.asarray', (['train_x'], {}), '(train_x)\n', (2311, 2320), True, 'import numpy as np\n'), ((2322, 2341), 'numpy.asarray', 'np.asarray', (['train_y'], {}), '(train_y)\n', (2332, 2341), True, 'import numpy as np\n'), ((2343, 2361), 'numpy.asarray', 'np.asarray', (['test_x'], {}), '(test_x)\n', (2353, 2361), True, 'import numpy as np\n'), ((2363, 2381), 'numpy.asarray', 'np.asarray', (['test_y'], {}), '(test_y)\n', (2373, 2381), True, 'import numpy as np\n'), ((2588, 2606), 'numpy.log10', 'np.log10', (['lr.cost_'], {}), '(lr.cost_)\n', (2596, 2606), True, 'import numpy as np\n'), ((321, 345), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (329, 345), True, 'import numpy as np\n'), ((1236, 1258), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (1242, 1258), True, 'import numpy as np\n'), ((1079, 1096), 'numpy.log', 'np.log', (['(1 - y_val)'], {}), '(1 - y_val)\n', (1085, 1096), True, 'import numpy as np\n'), ((1178, 1188), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1184, 1188), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.log', 'np.log', (['y_val'], {}), '(y_val)\n', (1055, 1062), True, 'import numpy as np\n')] |
from wavefront_reader.wavefront_classes.objfile import ObjFile
from .readface import read_face
def read_objfile(fname):
"""Takes .obj filename and return an ObjFile class."""
obj_file = ObjFile()
with open(fname) as f:
lines = f.read().splitlines()
if 'OBJ' not in lines[0]:
raise ValueError("File not .obj-formatted.")
# todo: assumes one object per .obj file, which is wrong
# todo: doesn't properly ignore comments
for line in lines:
if line:
prefix, value = line.split(' ', 1)
if prefix == 'o':
obj_file.add_prop(value)
if obj_file.has_prop():
if prefix == 'v':
obj_file.last_obj_prop.vertices.append([float(val) for val in value.split(' ')])
elif prefix == 'vn':
obj_file.last_obj_prop.vertex_normals.append([float(val) for val in value.split(' ')])
elif prefix == 'vt':
obj_file.last_obj_prop.vertex_textures.append([float(val) for val in value.split(' ')])
elif prefix == 'usemtl':
obj_file.last_obj_prop.material_name = value
elif prefix == 'f':
obj_file.last_obj_prop.faces.append(read_face(value, obj_file.last_obj_prop))
else:
obj_file.misc[prefix] = value
return obj_file
| [
"wavefront_reader.wavefront_classes.objfile.ObjFile"
] | [((195, 204), 'wavefront_reader.wavefront_classes.objfile.ObjFile', 'ObjFile', ([], {}), '()\n', (202, 204), False, 'from wavefront_reader.wavefront_classes.objfile import ObjFile\n')] |
#!/usr/bin/env python3
"""
Created on March 18 2020
@author: <NAME>
@description: Extract the taxonomy ID from an SBML file
"""
import argparse
import tempfile
import os
import logging
import shutil
import docker
def main(inputfile, output):
"""Call the extractTaxonomy docker to return the JSON file
:param inputfile: The path to the SBML file
:param output: The path to the output json file
:type inputfile: str
:type output: str
:rtype: None
:return: None
"""
docker_client = docker.from_env()
image_str = 'brsynth/extracttaxonomy-standalone'
try:
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.warning('Could not find the image, trying to pull it')
try:
docker_client.images.pull(image_str)
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.error('Cannot pull image: '+str(image_str))
exit(1)
with tempfile.TemporaryDirectory() as tmpOutputFolder:
if os.path.exists(inputfile):
shutil.copy(inputfile, tmpOutputFolder+'/input.dat')
command = ['/home/tool_extractTaxonomy.py',
'-input',
'/home/tmp_output/input.dat',
'-output',
'/home/tmp_output/output.dat']
container = docker_client.containers.run(image_str,
command,
detach=True,
stderr=True,
volumes={tmpOutputFolder+'/': {'bind': '/home/tmp_output', 'mode': 'rw'}})
container.wait()
err = container.logs(stdout=False, stderr=True)
err_str = err.decode('utf-8')
if 'ERROR' in err_str:
print(err_str)
elif 'WARNING' in err_str:
print(err_str)
if not os.path.exists(tmpOutputFolder+'/output.dat'):
print('ERROR: Cannot find the output file: '+str(tmpOutputFolder+'/output.dat'))
else:
shutil.copy(tmpOutputFolder+'/output.dat', output)
container.remove()
else:
logging.error('Cannot find the input file: '+str(inputfile))
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Extract the t')
parser.add_argument('-input', type=str)
parser.add_argument('-output', type=str)
params = parser.parse_args()
main(params.input, params.output)
| [
"tempfile.TemporaryDirectory",
"os.path.exists",
"argparse.ArgumentParser",
"logging.warning",
"docker.from_env",
"shutil.copy"
] | [((519, 536), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (534, 536), False, 'import docker\n'), ((2485, 2525), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Extract the t"""'], {}), "('Extract the t')\n", (2508, 2525), False, 'import argparse\n'), ((1017, 1046), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1044, 1046), False, 'import tempfile\n'), ((1078, 1103), 'os.path.exists', 'os.path.exists', (['inputfile'], {}), '(inputfile)\n', (1092, 1103), False, 'import os\n'), ((699, 761), 'logging.warning', 'logging.warning', (['"""Could not find the image, trying to pull it"""'], {}), "('Could not find the image, trying to pull it')\n", (714, 761), False, 'import logging\n'), ((1117, 1171), 'shutil.copy', 'shutil.copy', (['inputfile', "(tmpOutputFolder + '/input.dat')"], {}), "(inputfile, tmpOutputFolder + '/input.dat')\n", (1128, 1171), False, 'import shutil\n'), ((2076, 2123), 'os.path.exists', 'os.path.exists', (["(tmpOutputFolder + '/output.dat')"], {}), "(tmpOutputFolder + '/output.dat')\n", (2090, 2123), False, 'import os\n'), ((2254, 2306), 'shutil.copy', 'shutil.copy', (["(tmpOutputFolder + '/output.dat')", 'output'], {}), "(tmpOutputFolder + '/output.dat', output)\n", (2265, 2306), False, 'import shutil\n')] |
import os
import sys
import posthoganalytics
from django.apps import AppConfig
from django.conf import settings
from posthog.utils import get_git_branch, get_git_commit, get_machine_id
from posthog.version import VERSION
class PostHogConfig(AppConfig):
name = "posthog"
verbose_name = "PostHog"
def ready(self):
posthoganalytics.api_key = "<KEY>"
posthoganalytics.personal_api_key = os.environ.get("POSTHOG_PERSONAL_API_KEY")
# Skip plugin sync in manage.py scripts and in tests
# (the database tables might not yet be created)
if (
not settings.TEST
and not "makemigrations" in sys.argv
and not "migrate" in sys.argv
and not "manage.py" in " ".join(sys.argv)
and not "/mypy" in sys.argv[0]
):
from posthog.plugins import sync_plugin_config
# syncs posthog.json['plugins'] and the Plugin/PluginConfig models
sync_plugin_config()
if settings.DEBUG:
# log development server launch to posthog
if os.getenv("RUN_MAIN") == "true":
posthoganalytics.capture(
get_machine_id(),
"development server launched",
{"posthog_version": VERSION, "git_rev": get_git_commit(), "git_branch": get_git_branch(),},
)
posthoganalytics.disabled = True
elif settings.TEST or os.environ.get("OPT_OUT_CAPTURE"):
posthoganalytics.disabled = True
| [
"os.getenv",
"posthog.utils.get_git_branch",
"posthog.utils.get_git_commit",
"posthog.utils.get_machine_id",
"posthog.plugins.sync_plugin_config",
"os.environ.get"
] | [((416, 458), 'os.environ.get', 'os.environ.get', (['"""POSTHOG_PERSONAL_API_KEY"""'], {}), "('POSTHOG_PERSONAL_API_KEY')\n", (430, 458), False, 'import os\n'), ((971, 991), 'posthog.plugins.sync_plugin_config', 'sync_plugin_config', ([], {}), '()\n', (989, 991), False, 'from posthog.plugins import sync_plugin_config\n'), ((1090, 1111), 'os.getenv', 'os.getenv', (['"""RUN_MAIN"""'], {}), "('RUN_MAIN')\n", (1099, 1111), False, 'import os\n'), ((1459, 1492), 'os.environ.get', 'os.environ.get', (['"""OPT_OUT_CAPTURE"""'], {}), "('OPT_OUT_CAPTURE')\n", (1473, 1492), False, 'import os\n'), ((1185, 1201), 'posthog.utils.get_machine_id', 'get_machine_id', ([], {}), '()\n', (1199, 1201), False, 'from posthog.utils import get_git_branch, get_git_commit, get_machine_id\n'), ((1314, 1330), 'posthog.utils.get_git_commit', 'get_git_commit', ([], {}), '()\n', (1328, 1330), False, 'from posthog.utils import get_git_branch, get_git_commit, get_machine_id\n'), ((1346, 1362), 'posthog.utils.get_git_branch', 'get_git_branch', ([], {}), '()\n', (1360, 1362), False, 'from posthog.utils import get_git_branch, get_git_commit, get_machine_id\n')] |
import os
import json
import time
from datetime import timedelta
class TaskTimer:
def __init__(self):
self.time_performance = {}
self.start_times = {}
def start(self, task):
self.start_times[task] = time.time()
print('--- [{}] Start "{}"'.format(time.ctime(self.start_times[task]), task))
def end(self, task):
saving_end = time.time()
self.time_performance[task] = str(
timedelta(seconds=(saving_end - self.start_times[task]))
)
print(
'--- [{}] End "{}" in {} seconds'.format(
time.ctime(saving_end), task, self.time_performance[task]
)
)
def save(self, folder):
with open(os.path.join(folder, "time_performance.json"), "w") as fp:
json.dump(self.time_performance, fp)
| [
"time.ctime",
"os.path.join",
"datetime.timedelta",
"time.time",
"json.dump"
] | [((235, 246), 'time.time', 'time.time', ([], {}), '()\n', (244, 246), False, 'import time\n'), ((380, 391), 'time.time', 'time.time', ([], {}), '()\n', (389, 391), False, 'import time\n'), ((447, 501), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(saving_end - self.start_times[task])'}), '(seconds=saving_end - self.start_times[task])\n', (456, 501), False, 'from datetime import timedelta\n'), ((799, 835), 'json.dump', 'json.dump', (['self.time_performance', 'fp'], {}), '(self.time_performance, fp)\n', (808, 835), False, 'import json\n'), ((290, 324), 'time.ctime', 'time.ctime', (['self.start_times[task]'], {}), '(self.start_times[task])\n', (300, 324), False, 'import time\n'), ((599, 621), 'time.ctime', 'time.ctime', (['saving_end'], {}), '(saving_end)\n', (609, 621), False, 'import time\n'), ((728, 773), 'os.path.join', 'os.path.join', (['folder', '"""time_performance.json"""'], {}), "(folder, 'time_performance.json')\n", (740, 773), False, 'import os\n')] |
import datetime
import os, sys
import pprint
import requests
from pandas.io.json import json_normalize
import pandas as pd
URL = 'https://wsn.latice.eu/api/query/v2/'
#URL = 'http://localhost:8000/wsn/api/query/v2/'
#TOKEN = os.getenv('WSN_TOKEN')
TOKEN = os.getenv('WSN_TOKEN')
path = os.getcwd()
def query(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
return json
def get_token():
try:
token = os.environ['WSN_TOKEN']
return token
except KeyError:
print("Please set the environment variable WSN_TOKEN in .bashrc as follow: \n\t export WSN_TOKEN=xxxxxxxxxxxxxxxxx ")
sys.exit(1)
def query_df(
limit=100, # Pagination
fields=None, # Fields to return (all by default)
tags=None, # Tags to return (all by default)
interval=None, # If given will return the average in the interval
debug=False, # Not sent to the API
# Filters
time__gte=None, time__lte=None, # Time is special
**kw):
# Parameters
if time__gte:
time__gte = time__gte.timestamp()
if time__lte:
time__lte = time__lte.timestamp()
params = {
'limit': limit, # Pagination
'time__gte': time__gte, 'time__lte': time__lte, # Time filter
'fields': fields,
'tags': tags,
'interval': interval,
}
# Filter inside json
for key, value in kw.items():
if value is None:
params[key] = None
continue
if type(value) is datetime.datetime:
value = int(value.timestamp())
if isinstance(value, int):
key += ':int'
params[key] = value
# Query
headers = {'Authorization': 'Token %s' % TOKEN}
response = requests.get(URL, headers=headers, params=params)
response.raise_for_status()
json = response.json()
# Debug
if debug:
pprint.pprint(params)
pprint.pprint(json)
print()
df = json_normalize(json['results']) # convert json object to pandas dataframe
try:
df.time = pd.to_datetime(df.time)
except:
print('WARNING: no timestamp')
return df
def biomet_metadata():
meta = pd.read_csv(path + '/FINSE-stationary_variables_biomet.csv', sep=';')
return meta
if __name__ == '__main__':
# We need an authentication token
TOKEN = os.getenv('WSN_TOKEN')
# Number of elements to return in every query
limit = 100
# Example 1: Get all the fields and tags of a given mote from a given time.
# This is good to explore the data, but bad on performance.
response = query(limit=limit,
serial=0x1F566F057C105487,
time__gte=datetime.datetime(2017, 11, 15),
debug=True,
)
# Example 2: Get the RSSI of an Xbee module identified by its address
print('==============================================')
response = query(limit=limit,
source_addr_long=0x0013A2004105D4B6,
fields=['rssi'],
debug=True,
)
# Example 3: Get the battery and internal temperature from all motes,
# include the serial tag to tell them apart.
# Frames that don't have at least one of the fields we ask for will not be
# included.
print('==============================================')
response = query(limit=limit,
fields=['bat', 'in_temp'],
tags=['serial'],
debug=True,
)
# Example 4: Get the time the frame was received by the Pi
print('==============================================')
response = query(limit=limit,
serial=408520806,
fields=['received'],
debug=True,
)
# Example 5: Get the battery once every hour
response = query(limit=10,
serial=0x1F566F057C105487,
fields=['bat'],
interval=3600,
debug=True,
)
| [
"datetime.datetime",
"pandas.read_csv",
"os.getenv",
"pandas.to_datetime",
"requests.get",
"os.getcwd",
"sys.exit",
"pprint.pprint",
"pandas.io.json.json_normalize"
] | [((257, 279), 'os.getenv', 'os.getenv', (['"""WSN_TOKEN"""'], {}), "('WSN_TOKEN')\n", (266, 279), False, 'import os, sys\n'), ((288, 299), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (297, 299), False, 'import os, sys\n'), ((1447, 1496), 'requests.get', 'requests.get', (['URL'], {'headers': 'headers', 'params': 'params'}), '(URL, headers=headers, params=params)\n', (1459, 1496), False, 'import requests\n'), ((3044, 3093), 'requests.get', 'requests.get', (['URL'], {'headers': 'headers', 'params': 'params'}), '(URL, headers=headers, params=params)\n', (3056, 3093), False, 'import requests\n'), ((3264, 3295), 'pandas.io.json.json_normalize', 'json_normalize', (["json['results']"], {}), "(json['results'])\n", (3278, 3295), False, 'from pandas.io.json import json_normalize\n'), ((3490, 3559), 'pandas.read_csv', 'pd.read_csv', (["(path + '/FINSE-stationary_variables_biomet.csv')"], {'sep': '""";"""'}), "(path + '/FINSE-stationary_variables_biomet.csv', sep=';')\n", (3501, 3559), True, 'import pandas as pd\n'), ((3655, 3677), 'os.getenv', 'os.getenv', (['"""WSN_TOKEN"""'], {}), "('WSN_TOKEN')\n", (3664, 3677), False, 'import os, sys\n'), ((1591, 1612), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (1604, 1612), False, 'import pprint\n'), ((1621, 1640), 'pprint.pprint', 'pprint.pprint', (['json'], {}), '(json)\n', (1634, 1640), False, 'import pprint\n'), ((3188, 3209), 'pprint.pprint', 'pprint.pprint', (['params'], {}), '(params)\n', (3201, 3209), False, 'import pprint\n'), ((3218, 3237), 'pprint.pprint', 'pprint.pprint', (['json'], {}), '(json)\n', (3231, 3237), False, 'import pprint\n'), ((3366, 3389), 'pandas.to_datetime', 'pd.to_datetime', (['df.time'], {}), '(df.time)\n', (3380, 3389), True, 'import pandas as pd\n'), ((1917, 1928), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1925, 1928), False, 'import os, sys\n'), ((3977, 4008), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(11)', '(15)'], {}), '(2017, 11, 15)\n', (3994, 4008), False, 'import datetime\n')] |
from __future__ import annotations
import elasticache_auto_discovery
from pymemcache.client.hash import HashClient
# elasticache settings
elasticache_config_endpoint = "your-elasticache-cluster-endpoint:port"
nodes = elasticache_auto_discovery.discover(elasticache_config_endpoint)
nodes = map(lambda x: (x[1], int(x[2])), nodes)
memcache_client = HashClient(nodes)
def put(requestId, event):
"""
This function puts into memcache and get from it.
Memcache is hosted using elasticache
"""
# Put the UUID to the cache.
memcache_client.set(requestId, event)
def get(requestId):
# Get item (UUID) from the cache.
item = memcache_client.get(requestId)
return item
| [
"elasticache_auto_discovery.discover",
"pymemcache.client.hash.HashClient"
] | [((219, 283), 'elasticache_auto_discovery.discover', 'elasticache_auto_discovery.discover', (['elasticache_config_endpoint'], {}), '(elasticache_config_endpoint)\n', (254, 283), False, 'import elasticache_auto_discovery\n'), ((350, 367), 'pymemcache.client.hash.HashClient', 'HashClient', (['nodes'], {}), '(nodes)\n', (360, 367), False, 'from pymemcache.client.hash import HashClient\n')] |
from django.contrib import admin
from proposals.models import Proposal, ProposalSessionType
admin.site.register(ProposalSessionType)
admin.site.register(Proposal,
list_display = ["title", "session_type", "audience_level", "cancelled", "extreme_pycon", "invited"]
) | [
"django.contrib.admin.site.register"
] | [((95, 135), 'django.contrib.admin.site.register', 'admin.site.register', (['ProposalSessionType'], {}), '(ProposalSessionType)\n', (114, 135), False, 'from django.contrib import admin\n'), ((136, 268), 'django.contrib.admin.site.register', 'admin.site.register', (['Proposal'], {'list_display': "['title', 'session_type', 'audience_level', 'cancelled', 'extreme_pycon',\n 'invited']"}), "(Proposal, list_display=['title', 'session_type',\n 'audience_level', 'cancelled', 'extreme_pycon', 'invited'])\n", (155, 268), False, 'from django.contrib import admin\n')] |
import random
# Ex. takes in 2d20 and outputs the string Rolling 2 d20
def roll_str(rolls):
numDice = rolls.split('d')[0]
diceVal = rolls.split('d')[1]
if numDice == '':
numDice = int(1)
return "Rolling %s d%s" % (numDice, diceVal)
# Ex. takes in 2d20 and outputs resultString = 11, 19 results = 30 numDice = 2
def roll(rolls):
results = 0
resultString = ''
try:
numDice = rolls.split('d')[0]
except Exception as e:
print(e)
return "Use proper format!"
rolls, limit = map(str, rolls.split('d'))
if rolls == '':
rolls = int(1)
rolls = int(rolls)
limit = int(limit)
for r in range(rolls):
number = random.randint(1, limit)
results = results + number
if resultString == '':
resultString += str(number)
else:
resultString += ', ' + str(number)
# Returns 3 variables, make sure to store in 3 variables
return resultString, results, numDice
| [
"random.randint"
] | [((700, 724), 'random.randint', 'random.randint', (['(1)', 'limit'], {}), '(1, limit)\n', (714, 724), False, 'import random\n')] |
#
# Copyright (C) 2013 - 2017 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
# pylint: disable=ungrouped-imports
from __future__ import absolute_import
import anyconfig.backend.configobj as TT
import tests.backend.common as TBC
from anyconfig.compat import OrderedDict as ODict
CNF_0_S = """\
# This is the 'initial_comment'
# Which may be several lines
keyword1 = value1
'keyword 2' = 'value 2'
[ "section 1" ]
# This comment goes with keyword 3
keyword 3 = value 3
'keyword 4' = value4, value 5, 'value 6'
[[ sub-section ]] # an inline comment
# sub-section is inside "section 1"
'keyword 5' = 'value 7'
'keyword 6' = '''A multiline value,
that spans more than one line :-)
The line breaks are included in the value.'''
[[[ sub-sub-section ]]]
# sub-sub-section is *in* 'sub-section'
# which is in 'section 1'
'keyword 7' = 'value 8'
[section 2] # an inline comment
keyword8 = "value 9"
keyword9 = value10 # an inline comment
# The 'final_comment'
# Which also may be several lines
"""
_ML_0 = """A multiline value,
that spans more than one line :-)
The line breaks are included in the value."""
CNF_0 = ODict((('keyword1', 'value1'),
('keyword 2', 'value 2'),
('section 1',
ODict((('keyword 3', 'value 3'),
('keyword 4', ['value4', 'value 5', 'value 6']),
('sub-section',
ODict((('keyword 5', 'value 7'),
('keyword 6', _ML_0),
('sub-sub-section',
ODict((('keyword 7', 'value 8'), ))))))))),
('section 2',
ODict((('keyword8', 'value 9'), ('keyword9', 'value10'))))))
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF_0
cnf_s = CNF_0_S
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait):
load_options = dict(raise_errors=True)
dump_options = dict(indent_type=" ")
class Test_20(TBC.Test_10_dumps_and_loads, HasParserTrait):
pass
# vim:sw=4:ts=4:et:
| [
"anyconfig.compat.OrderedDict",
"anyconfig.backend.configobj.Parser"
] | [((1892, 1903), 'anyconfig.backend.configobj.Parser', 'TT.Parser', ([], {}), '()\n', (1901, 1903), True, 'import anyconfig.backend.configobj as TT\n'), ((1776, 1833), 'anyconfig.compat.OrderedDict', 'ODict', (["(('keyword8', 'value 9'), ('keyword9', 'value10'))"], {}), "((('keyword8', 'value 9'), ('keyword9', 'value10')))\n", (1781, 1833), True, 'from anyconfig.compat import OrderedDict as ODict\n'), ((1687, 1721), 'anyconfig.compat.OrderedDict', 'ODict', (["(('keyword 7', 'value 8'),)"], {}), "((('keyword 7', 'value 8'),))\n", (1692, 1721), True, 'from anyconfig.compat import OrderedDict as ODict\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
===================
prospect.viewer.cds
===================
Class containing all bokeh's ColumnDataSource objects needed in viewer.py
"""
import numpy as np
from pkg_resources import resource_filename
import bokeh.plotting as bk
from bokeh.models import ColumnDataSource
_specutils_imported = True
try:
from specutils import Spectrum1D, SpectrumList
except ImportError:
_specutils_imported = False
from ..coaddcam import coaddcam_prospect
from ..utilities import supported_desitarget_masks, vi_file_fields
def _airtovac(w):
"""Convert air wavelengths to vacuum wavelengths. Don't convert less than 2000 Å.
Parameters
----------
w : :class:`float`
Wavelength [Å] of the line in air.
Returns
-------
:class:`float`
Wavelength [Å] of the line in vacuum.
"""
if w < 2000.0:
return w;
vac = w
for iter in range(2):
sigma2 = (1.0e4/vac)*(1.0e4/vac)
fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2)
vac = w*fact
return vac
class ViewerCDS(object):
"""
Encapsulates Bokeh ColumnDataSource objects to be passed to js callback functions.
"""
def __init__(self):
self.cds_spectra = None
self.cds_median_spectra = None
self.cds_coaddcam_spec = None
self.cds_model = None
self.cds_model_2ndfit = None
self.cds_othermodel = None
self.cds_metadata = None
def load_spectra(self, spectra, with_noise=True):
""" Creates column data source for observed spectra """
self.cds_spectra = list()
is_desispec = False
if _specutils_imported and isinstance(spectra, SpectrumList):
s = spectra
bands = spectra.bands
elif _specutils_imported and isinstance(spectra, Spectrum1D):
s = [spectra]
bands = ['coadd']
else : # Assume desispec Spectra obj
is_desispec = True
s = spectra
bands = spectra.bands
for j, band in enumerate(bands):
input_wave = s.wave[band] if is_desispec else s[j].spectral_axis.value
input_nspec = spectra.num_spectra() if is_desispec else s[j].flux.shape[0]
cdsdata = dict(
origwave = input_wave.copy(),
plotwave = input_wave.copy(),
)
for i in range(input_nspec):
key = 'origflux'+str(i)
input_flux = spectra.flux[band][i] if is_desispec else s[j].flux.value[i, :]
cdsdata[key] = input_flux.copy()
if with_noise :
key = 'orignoise'+str(i)
input_ivar = spectra.ivar[band][i] if is_desispec else s[j].uncertainty.array[i, :]
noise = np.zeros(len(input_ivar))
w, = np.where( (input_ivar > 0) )
noise[w] = 1/np.sqrt(input_ivar[w])
cdsdata[key] = noise
cdsdata['plotflux'] = cdsdata['origflux0']
if with_noise :
cdsdata['plotnoise'] = cdsdata['orignoise0']
self.cds_spectra.append( ColumnDataSource(cdsdata, name=band) )
def compute_median_spectra(self, spectra):
""" Stores the median value for each spectrum into CDS.
Simple concatenation of all values from different bands.
"""
cdsdata = dict(median=[])
for i in range(spectra.num_spectra()):
flux_array = np.concatenate( tuple([spectra.flux[band][i] for band in spectra.bands]) )
w, = np.where( ~np.isnan(flux_array) )
if len(w)==0 :
cdsdata['median'].append(1)
else :
cdsdata['median'].append(np.median(flux_array[w]))
self.cds_median_spectra = ColumnDataSource(cdsdata)
def init_coaddcam_spec(self, spectra, with_noise=True):
""" Creates column data source for camera-coadded observed spectra
Do NOT store all coadded spectra in CDS obj, to reduce size of html files
Except for the first spectrum, coaddition is done later in javascript
"""
coadd_wave, coadd_flux, coadd_ivar = coaddcam_prospect(spectra)
cds_coaddcam_data = dict(
origwave = coadd_wave.copy(),
plotwave = coadd_wave.copy(),
plotflux = coadd_flux[0,:].copy(),
plotnoise = np.ones(len(coadd_wave))
)
if with_noise :
w, = np.where( (coadd_ivar[0,:] > 0) )
cds_coaddcam_data['plotnoise'][w] = 1/np.sqrt(coadd_ivar[0,:][w])
self.cds_coaddcam_spec = ColumnDataSource(cds_coaddcam_data)
def init_model(self, model, second_fit=False):
""" Creates a CDS for model spectrum """
mwave, mflux = model
cdsdata = dict(
origwave = mwave.copy(),
plotwave = mwave.copy(),
plotflux = np.zeros(len(mwave)),
)
for i in range(len(mflux)):
key = 'origflux'+str(i)
cdsdata[key] = mflux[i]
cdsdata['plotflux'] = cdsdata['origflux0']
if second_fit:
self.cds_model_2ndfit = ColumnDataSource(cdsdata)
else:
self.cds_model = ColumnDataSource(cdsdata)
def init_othermodel(self, zcatalog):
""" Initialize CDS for the 'other model' curve, from the best fit """
self.cds_othermodel = ColumnDataSource({
'plotwave' : self.cds_model.data['plotwave'],
'origwave' : self.cds_model.data['origwave'],
'origflux' : self.cds_model.data['origflux0'],
'plotflux' : self.cds_model.data['origflux0'],
'zref' : zcatalog['Z'][0]+np.zeros(len(self.cds_model.data['origflux0'])) # Track z reference in model
})
def load_metadata(self, spectra, mask_type=None, zcatalog=None, survey='DESI'):
""" Creates column data source for target-related metadata,
from fibermap, zcatalog and VI files
"""
if survey == 'DESI':
nspec = spectra.num_spectra()
# Optional metadata:
fibermap_keys = ['HPXPIXEL', 'MORPHTYPE', 'CAMERA',
'COADD_NUMEXP', 'COADD_EXPTIME',
'COADD_NUMNIGHT', 'COADD_NUMTILE']
# Optional metadata, will check matching FIRST/LAST/NUM keys in fibermap:
special_fm_keys = ['FIBER', 'NIGHT', 'EXPID', 'TILEID']
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'SPECTYPE', 'SUBTYPE', 'ZERR', 'ZWARN', 'DELTACHI2']
# Mandatory metadata:
self.phot_bands = ['G','R','Z', 'W1', 'W2']
supported_masks = supported_desitarget_masks
# Galactic extinction coefficients:
# - Wise bands from https://github.com/dstndstn/tractor/blob/master/tractor/sfd.py
# - Other bands from desiutil.dust (updated coefficients Apr 2021,
# matching https://desi.lbl.gov/trac/wiki/ImagingStandardBandpass)
R_extinction = {'W1':0.184, 'W2':0.113, 'W3':0.0241, 'W4':0.00910,
'G_N':3.258, 'R_N':2.176, 'Z_N':1.199,
'G_S':3.212, 'R_S':2.164, 'Z_S':1.211}
elif survey == 'SDSS':
nspec = spectra.flux.shape[0]
# Mandatory keys if zcatalog is set:
self.zcat_keys = ['Z', 'CLASS', 'SUBCLASS', 'Z_ERR', 'ZWARNING', 'RCHI2DIFF']
# Mandatory metadata:
self.phot_bands = ['u', 'g', 'r', 'i', 'z']
supported_masks = ['PRIMTARGET', 'SECTARGET',
'BOSS_TARGET1', 'BOSS_TARGET2',
'ANCILLARY_TARGET1', 'ANCILLARY_TARGET2',
'EBOSS_TARGET0', 'EBOSS_TARGET1', 'EBOSS_TARGET2']
else:
raise ValueError('Wrong survey')
self.cds_metadata = ColumnDataSource()
#- Generic metadata
if survey == 'DESI':
#- Special case for targetids: No int64 in js !!
self.cds_metadata.add([str(x) for x in spectra.fibermap['TARGETID']], name='TARGETID')
#- "Special" keys: check for FIRST/LAST/NUM
for fm_key in special_fm_keys:
use_first_last_num = False
if all([ (x+fm_key in spectra.fibermap.keys()) for x in ['FIRST_','LAST_','NUM_'] ]):
if np.any(spectra.fibermap['NUM_'+fm_key] > 1) : # if NUM==1, use fm_key only
use_first_last_num = True
self.cds_metadata.add(spectra.fibermap['FIRST_'+fm_key], name='FIRST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['LAST_'+fm_key], name='LAST_'+fm_key)
self.cds_metadata.add(spectra.fibermap['NUM_'+fm_key], name='NUM_'+fm_key)
if (not use_first_last_num) and fm_key in spectra.fibermap.keys():
# Do not load placeholder metadata:
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
#- "Normal" keys
for fm_key in fibermap_keys:
# Arbitrary choice:
if fm_key == 'COADD_NUMEXP' and 'NUM_EXPID' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMNIGHT' and 'NUM_NIGHT' in self.cds_metadata.data.keys():
continue
if fm_key == 'COADD_NUMTILE' and 'NUM_TILEID' in self.cds_metadata.data.keys():
continue
if fm_key in spectra.fibermap.keys():
if not (np.all(spectra.fibermap[fm_key]==0) or np.all(spectra.fibermap[fm_key]==-1)):
self.cds_metadata.add(spectra.fibermap[fm_key], name=fm_key)
elif survey == 'SDSS':
#- Set 'TARGETID' name to OBJID for convenience
self.cds_metadata.add([str(x.tolist()) for x in spectra.meta['plugmap']['OBJID']], name='TARGETID')
#- Photometry
for i, bandname in enumerate(self.phot_bands) :
if survey == 'SDSS':
mag = spectra.meta['plugmap']['MAG'][:, i]
else :
mag = np.zeros(nspec)
flux = spectra.fibermap['FLUX_'+bandname]
extinction = np.ones(len(flux))
if ('MW_TRANSMISSION_'+bandname) in spectra.fibermap.keys():
extinction = spectra.fibermap['MW_TRANSMISSION_'+bandname]
elif ('EBV' in spectra.fibermap.keys()) and (bandname.upper() in ['W1','W2','W3','W4']):
extinction = 10**(- R_extinction[bandname.upper()] * spectra.fibermap['EBV'])
elif all(x in spectra.fibermap.keys() for x in ['EBV','PHOTSYS']) and (bandname.upper() in ['G','R','Z']):
for photsys in ['N', 'S']:
wphot, = np.where(spectra.fibermap['PHOTSYS'] == photsys)
a_band = R_extinction[bandname.upper()+"_"+photsys] * spectra.fibermap['EBV'][wphot]
extinction[wphot] = 10**(-a_band / 2.5)
w, = np.where( (flux>0) & (extinction>0) )
mag[w] = -2.5*np.log10(flux[w]/extinction[w])+22.5
self.cds_metadata.add(mag, name='mag_'+bandname)
#- Targeting masks
if mask_type is not None:
if survey == 'DESI':
if mask_type not in spectra.fibermap.keys():
mask_candidates = [x for x in spectra.fibermap.keys() if '_TARGET' in x]
raise ValueError(mask_type+" is not in spectra.fibermap.\n Hints of available masks: "+(' '.join(mask_candidates)))
mask_used = supported_masks[mask_type]
target_bits = spectra.fibermap[mask_type]
target_info = [ ' '.join(mask_used.names(x)) for x in target_bits ]
elif survey == 'SDSS':
assert mask_type in supported_masks
target_info = [ mask_type + ' (DUMMY)' for x in spectra.meta['plugmap'] ] # placeholder
self.cds_metadata.add(target_info, name='Targeting masks')
#- Software versions
#- TODO : get template version (from zcatalog...)
if survey == 'SDSS':
spec_version = 'SDSS'
else :
spec_version = '0'
for xx,yy in spectra.meta.items() :
if yy=="desispec" : spec_version = spectra.meta[xx.replace('NAM','VER')]
self.cds_metadata.add([spec_version for i in range(nspec)], name='spec_version')
redrock_version = ["-1" for i in range(nspec)]
if zcatalog is not None:
if 'RRVER' in zcatalog.keys(): redrock_version = zcatalog['RRVER'].data
self.cds_metadata.add(redrock_version, name='redrock_version')
self.cds_metadata.add(np.zeros(nspec)-1, name='template_version')
#- Redshift fit
if zcatalog is not None:
for zcat_key in self.zcat_keys:
if 'TYPE' in zcat_key or 'CLASS' in zcat_key:
data = zcatalog[zcat_key].astype('U{0:d}'.format(zcatalog[zcat_key].dtype.itemsize))
else :
data = zcatalog[zcat_key]
self.cds_metadata.add(data, name=zcat_key)
#- VI informations
default_vi_info = [ (x[1],x[3]) for x in vi_file_fields if x[0][0:3]=="VI_" ]
for vi_key, vi_value in default_vi_info:
self.cds_metadata.add([vi_value for i in range(nspec)], name=vi_key)
def load_spectral_lines(self, z=0):
line_data = dict(
restwave = [],
plotwave = [],
name = [],
longname = [],
plotname = [],
emission = [],
major = [],
#y = []
)
for line_category in ('emission', 'absorption'):
# encoding=utf-8 is needed to read greek letters
line_array = np.genfromtxt(resource_filename('prospect', "data/{0}_lines.txt".format(line_category)),
delimiter=",",
dtype=[("name", "|U20"),
("longname", "|U20"),
("wavelength", float),
("vacuum", bool),
("major", bool)],
encoding='utf-8')
vacuum_wavelengths = line_array['wavelength']
w, = np.where(line_array['vacuum']==False)
vacuum_wavelengths[w] = np.array([_airtovac(wave) for wave in line_array['wavelength'][w]])
line_data['restwave'].extend(vacuum_wavelengths)
line_data['plotwave'].extend(vacuum_wavelengths * (1+z))
line_data['name'].extend(line_array['name'])
line_data['longname'].extend(line_array['longname'])
line_data['plotname'].extend(line_array['name'])
emission_flag = True if line_category=='emission' else False
line_data['emission'].extend([emission_flag for row in line_array])
line_data['major'].extend(line_array['major'])
self.cds_spectral_lines = ColumnDataSource(line_data)
| [
"numpy.median",
"numpy.sqrt",
"numpy.log10",
"numpy.where",
"numpy.any",
"numpy.zeros",
"bokeh.models.ColumnDataSource",
"numpy.isnan",
"numpy.all"
] | [((3960, 3985), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (3976, 3985), False, 'from bokeh.models import ColumnDataSource\n'), ((4801, 4836), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cds_coaddcam_data'], {}), '(cds_coaddcam_data)\n', (4817, 4836), False, 'from bokeh.models import ColumnDataSource\n'), ((8134, 8152), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {}), '()\n', (8150, 8152), False, 'from bokeh.models import ColumnDataSource\n'), ((15624, 15651), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['line_data'], {}), '(line_data)\n', (15640, 15651), False, 'from bokeh.models import ColumnDataSource\n'), ((4656, 4686), 'numpy.where', 'np.where', (['(coadd_ivar[0, :] > 0)'], {}), '(coadd_ivar[0, :] > 0)\n', (4664, 4686), True, 'import numpy as np\n'), ((5356, 5381), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (5372, 5381), False, 'from bokeh.models import ColumnDataSource\n'), ((5425, 5450), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {}), '(cdsdata)\n', (5441, 5450), False, 'from bokeh.models import ColumnDataSource\n'), ((14922, 14961), 'numpy.where', 'np.where', (["(line_array['vacuum'] == False)"], {}), "(line_array['vacuum'] == False)\n", (14930, 14961), True, 'import numpy as np\n'), ((3283, 3319), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['cdsdata'], {'name': 'band'}), '(cdsdata, name=band)\n', (3299, 3319), False, 'from bokeh.models import ColumnDataSource\n'), ((4740, 4768), 'numpy.sqrt', 'np.sqrt', (['coadd_ivar[0, :][w]'], {}), '(coadd_ivar[0, :][w])\n', (4747, 4768), True, 'import numpy as np\n'), ((10551, 10566), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (10559, 10566), True, 'import numpy as np\n'), ((11478, 11517), 'numpy.where', 'np.where', (['((flux > 0) & (extinction > 0))'], {}), '((flux > 0) & (extinction > 0))\n', (11486, 11517), True, 'import numpy as np\n'), ((13184, 13199), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (13192, 13199), True, 'import numpy as np\n'), ((2975, 2999), 'numpy.where', 'np.where', (['(input_ivar > 0)'], {}), '(input_ivar > 0)\n', (2983, 2999), True, 'import numpy as np\n'), ((3745, 3765), 'numpy.isnan', 'np.isnan', (['flux_array'], {}), '(flux_array)\n', (3753, 3765), True, 'import numpy as np\n'), ((3899, 3923), 'numpy.median', 'np.median', (['flux_array[w]'], {}), '(flux_array[w])\n', (3908, 3923), True, 'import numpy as np\n'), ((8646, 8691), 'numpy.any', 'np.any', (["(spectra.fibermap['NUM_' + fm_key] > 1)"], {}), "(spectra.fibermap['NUM_' + fm_key] > 1)\n", (8652, 8691), True, 'import numpy as np\n'), ((3037, 3059), 'numpy.sqrt', 'np.sqrt', (['input_ivar[w]'], {}), '(input_ivar[w])\n', (3044, 3059), True, 'import numpy as np\n'), ((11546, 11579), 'numpy.log10', 'np.log10', (['(flux[w] / extinction[w])'], {}), '(flux[w] / extinction[w])\n', (11554, 11579), True, 'import numpy as np\n'), ((9241, 9278), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == 0)'], {}), '(spectra.fibermap[fm_key] == 0)\n', (9247, 9278), True, 'import numpy as np\n'), ((9280, 9318), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == -1)'], {}), '(spectra.fibermap[fm_key] == -1)\n', (9286, 9318), True, 'import numpy as np\n'), ((9965, 10002), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == 0)'], {}), '(spectra.fibermap[fm_key] == 0)\n', (9971, 10002), True, 'import numpy as np\n'), ((10004, 10042), 'numpy.all', 'np.all', (['(spectra.fibermap[fm_key] == -1)'], {}), '(spectra.fibermap[fm_key] == -1)\n', (10010, 10042), True, 'import numpy as np\n'), ((11235, 11283), 'numpy.where', 'np.where', (["(spectra.fibermap['PHOTSYS'] == photsys)"], {}), "(spectra.fibermap['PHOTSYS'] == photsys)\n", (11243, 11283), True, 'import numpy as np\n')] |
# -*- coding: latin-1 -*-
import json
import pytest
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.read_registration import RegistrationRead
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.oidc.userinfo import UserInfo
from oidcmsg.oidc import RegistrationRequest
KEYDEFS = [
{"type": "RSA", "key": "", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
RESPONSE_TYPES_SUPPORTED = [
["code"],
["token"],
["id_token"],
["code", "token"],
["code", "id_token"],
["id_token", "token"],
["code", "token", "id_token"],
["none"],
]
CAPABILITIES = {
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
],
}
msg = {
"application_type": "web",
"redirect_uris": [
"https://client.example.org/callback",
"https://client.example.org/callback2",
],
"client_name": "<NAME>",
"client_name#ja-Jpan-JP": "クライアント名",
"subject_type": "pairwise",
"token_endpoint_auth_method": "client_secret_basic",
"jwks_uri": "https://client.example.org/my_public_keys.jwks",
"userinfo_encrypted_response_alg": "RSA1_5",
"userinfo_encrypted_response_enc": "A128CBC-HS256",
"contacts": ["<EMAIL>", "<EMAIL>"],
"request_uris": [
"https://client.example.org/rf.txt#qpXaRLh_n93TT",
"https://client.example.org/rf.txt",
],
"post_logout_redirect_uris": [
"https://rp.example.com/pl?foo=bar",
"https://rp.example.com/pl",
],
}
CLI_REQ = RegistrationRequest(**msg)
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
conf = {
"issuer": "https://example.com/",
"password": "<PASSWORD>",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {"key_defs": KEYDEFS, "uri_path": "static/jwks.json"},
"endpoint": {
"registration": {
"path": "registration",
"class": Registration,
"kwargs": {"client_auth_method": None},
},
"registration_api": {
"path": "registration_api",
"class": RegistrationRead,
"kwargs": {"client_authn_method": ["bearer_header"]},
},
"authorization": {
"path": "authorization",
"class": Authorization,
"kwargs": {},
},
"token": {
"path": "token",
"class": AccessToken,
"kwargs": {
"client_authn_method": [
"client_secret_post",
"client_secret_basic",
"client_secret_jwt",
"private_key_jwt",
]
},
},
"userinfo": {"path": "userinfo", "class": UserInfo, "kwargs": {}},
},
"template_dir": "template",
}
endpoint_context = EndpointContext(conf)
self.registration_endpoint = endpoint_context.endpoint["registration"]
self.registration_api_endpoint = endpoint_context.endpoint["registration_read"]
def test_do_response(self):
_req = self.registration_endpoint.parse_request(CLI_REQ.to_json())
_resp = self.registration_endpoint.process_request(request=_req)
msg = self.registration_endpoint.do_response(**_resp)
assert isinstance(msg, dict)
_msg = json.loads(msg["response"])
assert _msg
_api_req = self.registration_api_endpoint.parse_request(
"client_id={}".format(_resp["response_args"]["client_id"]),
auth="Bearer {}".format(
_resp["response_args"]["registration_access_token"]
),
)
assert set(_api_req.keys()) == {"client_id"}
_info = self.registration_api_endpoint.process_request(request=_api_req)
assert set(_info.keys()) == {"response_args"}
assert _info["response_args"] == _resp["response_args"]
_endp_response = self.registration_api_endpoint.do_response(_info)
assert set(_endp_response.keys()) == {"response", "http_headers"}
assert ("Content-type", "application/json") in _endp_response["http_headers"]
| [
"pytest.fixture",
"json.loads",
"oidcmsg.oidc.RegistrationRequest",
"oidcendpoint.endpoint_context.EndpointContext"
] | [((1798, 1824), 'oidcmsg.oidc.RegistrationRequest', 'RegistrationRequest', ([], {}), '(**msg)\n', (1817, 1824), False, 'from oidcmsg.oidc import RegistrationRequest\n'), ((1860, 1888), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1874, 1888), False, 'import pytest\n'), ((3548, 3569), 'oidcendpoint.endpoint_context.EndpointContext', 'EndpointContext', (['conf'], {}), '(conf)\n', (3563, 3569), False, 'from oidcendpoint.endpoint_context import EndpointContext\n'), ((4032, 4059), 'json.loads', 'json.loads', (["msg['response']"], {}), "(msg['response'])\n", (4042, 4059), False, 'import json\n')] |
from django.contrib import admin
from modelapp.models import Project
# Register your models here.
class Projectadmin(admin.ModelAdmin):
list_display = ['startdate','enddate','name','assignedto','priority']
admin.site.register(Project,Projectadmin) | [
"django.contrib.admin.site.register"
] | [((212, 254), 'django.contrib.admin.site.register', 'admin.site.register', (['Project', 'Projectadmin'], {}), '(Project, Projectadmin)\n', (231, 254), False, 'from django.contrib import admin\n')] |
import logging
import time
import numpy as np
from eda import ma_data, tx_data
from sir_fitting_us import seir_experiment, make_csv_from_tx_traj
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info("Fitting model.")
# initial values taken from previous fit, used to seed MH sampler efficiently.
x0 = np.array([ 0.393, -2.586, -3.241, -5.874, -24.999])
# ma_traj = seir_experiment(ma_data, x0, iterations=10000)
tx_traj = seir_experiment(tx_data, x0, iterations=10000)
# mean_ll = np.mean([ll for (x, ll) in ma_traj])
mean_ll = np.mean([ll for (x, ll) in tx_traj])
logger.info("Model fitting finished with mean log-likelihood: {}".format(mean_ll))
if mean_ll < -2000:
raise AssertionError(
"""Mean log-likelihood {} less than threshold of
-20. This is probably an error.""".format(mean_ll)
)
underscored_time = time.ctime().replace(" ", "_")
fname = "ma_seir_output_{}.csv".format(underscored_time)
make_csv_from_tx_traj(tx_traj, tx_data, fname)
| [
"logging.getLogger",
"numpy.mean",
"time.ctime",
"sir_fitting_us.make_csv_from_tx_traj",
"numpy.array",
"sir_fitting_us.seir_experiment"
] | [((157, 184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'import logging\n'), ((331, 381), 'numpy.array', 'np.array', (['[0.393, -2.586, -3.241, -5.874, -24.999]'], {}), '([0.393, -2.586, -3.241, -5.874, -24.999])\n', (339, 381), True, 'import numpy as np\n'), ((456, 502), 'sir_fitting_us.seir_experiment', 'seir_experiment', (['tx_data', 'x0'], {'iterations': '(10000)'}), '(tx_data, x0, iterations=10000)\n', (471, 502), False, 'from sir_fitting_us import seir_experiment, make_csv_from_tx_traj\n'), ((563, 597), 'numpy.mean', 'np.mean', (['[ll for x, ll in tx_traj]'], {}), '([ll for x, ll in tx_traj])\n', (570, 597), True, 'import numpy as np\n'), ((961, 1007), 'sir_fitting_us.make_csv_from_tx_traj', 'make_csv_from_tx_traj', (['tx_traj', 'tx_data', 'fname'], {}), '(tx_traj, tx_data, fname)\n', (982, 1007), False, 'from sir_fitting_us import seir_experiment, make_csv_from_tx_traj\n'), ((873, 885), 'time.ctime', 'time.ctime', ([], {}), '()\n', (883, 885), False, 'import time\n')] |
import csv
import re
import unicodedata
import bs4
import wikia
from modules import utils
from modules.config import Config
class WikiaHandler():
def __init__(self):
config = Config()
self.scraping_config = config.get_scraping_config()
self.parsing_config = config.get_parsing_config()
self.out_config = config.get_out_config()
self.make_page_names()
def make_page_names(self):
years = self.scraping_config['years']
page_name = self.scraping_config['sub_page']
names = [f'{page_name} {year}-{year+1}' for year in years]
names = [x.replace('Art History 2018-2019','Art History 2018-19') for x in names]
names = [x.replace('Art History 2019-2020','Art History Jobs 2019-20') for x in names]
names = [x.replace('Art History 2020-2021','Art History 2020-21') for x in names]
names = [x.replace('Art History 2021-2022','Art History 2021-22') for x in names]
self.page_names = names
def create_fields_file(self):
data = []
for page_name in self.page_names:
print(f'Begin processing {page_name}')
year = self.get_year_from_page_name(page_name)
html = self.get_html_for_page(page_name)
sections = utils.get_sections_for_tag(html, 'h2')
fields_in_page = []
for section in sections:
soup = bs4.BeautifulSoup(section, 'html.parser')
section_title_list = soup.select('h2 .mw-headline')
if self.is_field_title(section_title_list):
field = self.clean_text(section_title_list[0].text)
if field not in fields_in_page:
fields_in_page.append(field)
if len(data) > 0:
fields_in_list = []
for item in data:
fields_in_list.append(item['field'])
fields_in_list = set(fields_in_list)
if field not in fields_in_list:
item = {'field': field, 'years': year}
data.append(item)
else:
for item in data:
if field == item['field']:
item['years'] = item['years'] + ',' + year
else:
item = {'field': field, 'years': year}
data.append(item)
self.write_fields_file(data)
def get_year_from_page_name(self, page_name):
year_regex = re.compile(r'\d{4,}(?=-)')
return year_regex.search(page_name).group()
def get_html_for_page(self, page_name):
page_content = wikia.page(self.scraping_config['main_wiki'], page_name)
return page_content.html()
def get_sections_for_tag(self, html, tag):
sections = html.split(f'<{tag}>')
sections = [f'<{tag}>' + section for section in sections][1:]
return sections
def write_fields_file(self, data):
keys = data[0].keys()
with open(f"{self.out_config}/{self.out_config['fields_file']}", 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, keys)
writer.writeheader()
writer.writerows(data)
def is_field_title(self, section_title_list):
return len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text) and not self.scraping_config['tt_key'] in section_title_list[0].text and not self.scraping_config['non_tt_key'] in section_title_list[0].text
def create_scrape_file(self):
self.get_fields_dict()
data = []
for page in self.page_names:
print(f'Begin processing {page}')
page_data = self.get_page_data(page)
data.extend(page_data)
keys = data[0].keys()
print(keys)
print(data[0])
self.weight_jobs(data)
with open(f"{self.out_config['path']}/{self.out_config['new_scrape_file']}", 'w', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, keys)
writer.writeheader()
writer.writerows(data)
def get_fields_dict(self):
with open(self.parsing_config['fields_dictionary'], mode='r') as infile:
reader = csv.reader(infile)
self.fields_dict = {rows[0]:rows[1] for rows in reader}
def get_page_data(self, page_name):
html = self.get_html_for_page(page_name)
self.current_year = self.get_year_from_page_name(page_name)
if self.page_is_segmented_by_tt_status(html):
data = self.process_page_segmented_by_tt_status(html)
else:
data = self.process_unsegmented_page(html)
return data
def page_is_segmented_by_tt_status(self, html):
soup = bs4.BeautifulSoup(html, 'html.parser')
return soup.find(id='TENURE_TRACK_JOBS') is not None
def process_page_segmented_by_tt_status(self, html):
sections = utils.get_sections_for_tag(html, 'h2')
jobs = []
grouped_sections = self.group_sections_by_tt_status(sections)
for section in grouped_sections['tt']:
tt_jobs = self.get_jobs_from_field_section(section, True)
jobs.extend(tt_jobs)
for section in grouped_sections['non_tt']:
non_tt_jobs = self.get_jobs_from_field_section(section, False)
jobs.extend(non_tt_jobs)
return jobs
def process_unsegmented_page(self, html):
sections = utils.get_sections_for_tag(html, 'h2')
jobs = []
for section in sections:
section_title_list = utils.get_selection_from_content(section, 'h2 .mw-headline')
if len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text):
data = self.get_jobs_from_field_section(section)
jobs.extend(data)
return jobs
def group_sections_by_tt_status(self, sections):
tt_sections = []
non_tt_sections = []
section_type = None
for section in sections:
section_title_list = utils.get_selection_from_content(section, 'h2 .mw-headline')
if len(section_title_list) > 0 and not self.section_is_excluded(section_title_list[0].text):
section_title = section_title_list[0].text
if self.scraping_config['tt_key'] in section_title or self.scraping_config['non_tt_key'] in section_title:
if self.scraping_config['tt_key'] in section_title:
section_type = 'tt'
if self.scraping_config['non_tt_key'] in section_title:
section_type = 'non_tt'
else:
if section_type == 'tt':
tt_sections.append(section)
if section_type == 'non_tt':
non_tt_sections.append(section)
return {'tt': tt_sections, 'non_tt': non_tt_sections}
def section_is_excluded(self, section_title):
excluded_sections = self.scraping_config['excluded_sections']
for excluded_section in excluded_sections:
if excluded_section in section_title:
return True
return False
def get_jobs_from_field_section(self, html, isTt = None):
original_field = self.get_field(html)
normalized_field = self.normalize_field(original_field)
job_listings = utils.get_sections_for_tag(html, 'h3')
jobs = []
if len(job_listings) > 0:
for job in job_listings:
title_list = utils.get_selection_from_content(job, 'h3 .mw-headline')
body = bs4.BeautifulSoup(job, 'html.parser').get_text()
if self.scraping_config['end_marker'] in body:
body = body.split(self.scraping_config['end_marker'])[0]
if len(title_list) > 0:
headline = self.clean_text(title_list[0].get_text())
if not 'see also' in headline.lower():
body = self.clean_text(body)
job_type_keys = self.get_job_type_keys(headline, body)
# print('job_type_keys', job_type_keys)
if len(job_type_keys) == 0:
print('No job type keys found', headline)
if original_field == 'Fellowships':
isTt = False
data = {
'year': self.current_year,
'field': normalized_field,
'original_field': self.clean_text(original_field),
'keys': ', '.join(job_type_keys),
'is_tt': self.get_tenure_status(job_type_keys, isTt),
'rank': self.get_rank(job_type_keys),
'headline': headline,
# 'school': None,
# 'department': None,
# 'location': self.get_location_from_headline(headline),
'text': self.clean_body(body),
}
jobs.append(data)
return jobs
def get_field(self, section):
field_header = utils.get_selection_from_content(section, 'h2 .mw-headline')
return self.clean_text(field_header[0].text)
def normalize_field(self, field):
return self.fields_dict[field]
def clean_text(self, text):
for string in self.parsing_config['strip']:
if string == '\xa0':
print(text, string)
text = text.replace(string, ' ')
text = unicodedata.normalize('NFKD', text)
return text
def clean_body(self, body):
weird_stuff_regex = r'[][[:cntrl:]]'
return re.sub(weird_stuff_regex, '', body)
def get_location_from_headline(self, headline):
location_regex = re.compile(r'\([^\n)]*[A-Z]{2}\)')
return location_regex.search(headline).group().replace('(', '').replace(')', '') if location_regex.search(headline) else None
def get_job_type_keys(self, headline, body):
title_keys = self.get_matching_keys(headline, self.parsing_config['HEADLINE'])
text_keys = self.get_matching_keys(body, self.parsing_config['BODY'])
return title_keys + text_keys
def get_tenure_status(self, keys, isTt):
if isTt == False:
return False
elif len(keys) == 0:
return 'manual'
elif 'tt' in keys or isTt == True:
if 'non_tt' in keys or 'vap' in keys or 'lecturer' in keys or 'postdoc' in keys or 'contract' in keys or isTt == False:
return 'manual'
else:
return True
elif ('tt' not in keys) and ('assistant_prof' in keys or 'associate_prof' in keys or 'full_prof' in keys):
if ('vap' not in keys and 'lecturer' not in keys and 'postdoc' not in keys and 'contract' not in keys):
return True
else:
return 'manual'
elif len(keys) == 1 and 'open_rank' in keys:
return 'manual'
else:
return False
def get_rank(self, keys):
keys = list(set(keys))
if 'tt' in keys:
keys.remove('tt')
if 'non_tt' in keys:
keys.remove('non_tt')
return ', '.join(keys)
def get_matching_keys(self, text, job_type_dict):
types = []
for key, value in job_type_dict.items():
if self.any_in_list_in_text(value, text):
types.append(key)
return types
def any_in_list_in_text(self, list, text):
match = False
for word in list:
if word in text.lower():
match = True
return match
def weight_jobs(self, jobs):
for job in jobs:
matches = []
matches.append(job)
for job_to_compare in jobs:
if job_to_compare['year'] == job['year'] and job_to_compare['headline'] == job['headline'] and job_to_compare['text'] == job['text'] and job_to_compare['original_field'] is not job['original_field']:
matches.append(job_to_compare)
job['count'] = 1 / len(matches)
| [
"csv.DictWriter",
"modules.config.Config",
"wikia.page",
"re.compile",
"modules.utils.get_selection_from_content",
"bs4.BeautifulSoup",
"unicodedata.normalize",
"re.sub",
"csv.reader",
"modules.utils.get_sections_for_tag"
] | [((191, 199), 'modules.config.Config', 'Config', ([], {}), '()\n', (197, 199), False, 'from modules.config import Config\n'), ((2687, 2713), 're.compile', 're.compile', (['"""\\\\d{4,}(?=-)"""'], {}), "('\\\\d{4,}(?=-)')\n", (2697, 2713), False, 'import re\n'), ((2834, 2890), 'wikia.page', 'wikia.page', (["self.scraping_config['main_wiki']", 'page_name'], {}), "(self.scraping_config['main_wiki'], page_name)\n", (2844, 2890), False, 'import wikia\n'), ((4964, 5002), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (4981, 5002), False, 'import bs4\n'), ((5141, 5179), 'modules.utils.get_sections_for_tag', 'utils.get_sections_for_tag', (['html', '"""h2"""'], {}), "(html, 'h2')\n", (5167, 5179), False, 'from modules import utils\n'), ((5667, 5705), 'modules.utils.get_sections_for_tag', 'utils.get_sections_for_tag', (['html', '"""h2"""'], {}), "(html, 'h2')\n", (5693, 5705), False, 'from modules import utils\n'), ((7618, 7656), 'modules.utils.get_sections_for_tag', 'utils.get_sections_for_tag', (['html', '"""h3"""'], {}), "(html, 'h3')\n", (7644, 7656), False, 'from modules import utils\n'), ((9538, 9598), 'modules.utils.get_selection_from_content', 'utils.get_selection_from_content', (['section', '"""h2 .mw-headline"""'], {}), "(section, 'h2 .mw-headline')\n", (9570, 9598), False, 'from modules import utils\n'), ((10097, 10132), 're.sub', 're.sub', (['weird_stuff_regex', '""""""', 'body'], {}), "(weird_stuff_regex, '', body)\n", (10103, 10132), False, 'import re\n'), ((10211, 10247), 're.compile', 're.compile', (['"""\\\\([^\\\\n)]*[A-Z]{2}\\\\)"""'], {}), "('\\\\([^\\\\n)]*[A-Z]{2}\\\\)')\n", (10221, 10247), False, 'import re\n'), ((1275, 1313), 'modules.utils.get_sections_for_tag', 'utils.get_sections_for_tag', (['html', '"""h2"""'], {}), "(html, 'h2')\n", (1301, 1313), False, 'from modules import utils\n'), ((3303, 3332), 'csv.DictWriter', 'csv.DictWriter', (['csvfile', 'keys'], {}), '(csvfile, keys)\n', (3317, 3332), False, 'import csv\n'), ((4206, 4235), 'csv.DictWriter', 'csv.DictWriter', (['csvfile', 'keys'], {}), '(csvfile, keys)\n', (4220, 4235), False, 'import csv\n'), ((4442, 4460), 'csv.reader', 'csv.reader', (['infile'], {}), '(infile)\n', (4452, 4460), False, 'import csv\n'), ((5790, 5850), 'modules.utils.get_selection_from_content', 'utils.get_selection_from_content', (['section', '"""h2 .mw-headline"""'], {}), "(section, 'h2 .mw-headline')\n", (5822, 5850), False, 'from modules import utils\n'), ((6277, 6337), 'modules.utils.get_selection_from_content', 'utils.get_selection_from_content', (['section', '"""h2 .mw-headline"""'], {}), "(section, 'h2 .mw-headline')\n", (6309, 6337), False, 'from modules import utils\n'), ((9948, 9983), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'text'], {}), "('NFKD', text)\n", (9969, 9983), False, 'import unicodedata\n'), ((1406, 1447), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['section', '"""html.parser"""'], {}), "(section, 'html.parser')\n", (1423, 1447), False, 'import bs4\n'), ((7775, 7831), 'modules.utils.get_selection_from_content', 'utils.get_selection_from_content', (['job', '"""h3 .mw-headline"""'], {}), "(job, 'h3 .mw-headline')\n", (7807, 7831), False, 'from modules import utils\n'), ((7855, 7892), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['job', '"""html.parser"""'], {}), "(job, 'html.parser')\n", (7872, 7892), False, 'import bs4\n')] |
import mod
def foo():
return 1
try:
mod.foo = foo
except RuntimeError:
print("RuntimeError1")
print(mod.foo())
try:
mod.foo = 1
except RuntimeError:
print("RuntimeError2")
print(mod.foo)
try:
mod.foo = 2
except RuntimeError:
print("RuntimeError3")
print(mod.foo)
def __main__():
pass
| [
"mod.foo"
] | [((117, 126), 'mod.foo', 'mod.foo', ([], {}), '()\n', (124, 126), False, 'import mod\n')] |
import json
import os
from setuptools import find_packages, setup
PACKAGE_NAMESPACE_NAME = 'aistorms'
METADATA_FILE_NAME = 'metadata.json'
REQUIREMENTS_FILE_NAME = 'requirements.txt'
_metadata = \
json.load(
open(os.path.join(
os.path.dirname(__file__),
PACKAGE_NAMESPACE_NAME,
METADATA_FILE_NAME)))
setup(
name=_metadata['PACKAGE'],
author=_metadata['AUTHOR'],
author_email=_metadata['AUTHOR_EMAIL'],
url=_metadata['URL'],
version=_metadata['VERSION'],
description=_metadata['DESCRIPTION'],
long_description=_metadata['DESCRIPTION'],
keywords=_metadata['DESCRIPTION'],
packages=find_packages(),
include_package_data=True,
install_requires=
[s for s in open(REQUIREMENTS_FILE_NAME).readlines()
if not s.startswith('#')])
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((683, 698), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (696, 698), False, 'from setuptools import find_packages, setup\n'), ((261, 286), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'import os\n')] |
import argparse
import logging
import os
import pathlib
import time
import log
import onenote_auth
import onenote
import pipeline
logger = logging.getLogger()
def main():
args = parse_args()
if args.verbose:
log.setup_logging(logging.DEBUG)
else:
log.setup_logging(logging.INFO)
# Allow a redirect URI over plain HTTP (no TLS):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# Authorize the app:
s = onenote_auth.get_session(args.new_session)
output_dir = pathlib.Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logger.info('Writing to "%s"', output_dir)
start_time = time.perf_counter()
pipe = pipeline.Pipeline(s, args.notebook, output_dir)
pages = 0
try:
for page_count, page in enumerate(
onenote.get_notebook_pages(s, args.notebook), 1
):
log_msg = f'Page {page_count}: {page["title"]}'
if args.start_page is None or page_count >= args.start_page:
logger.info(log_msg)
pipe.add_page(page)
pages += 1
else:
logger.info(log_msg + ' [skipped]')
if args.max_pages and page_count > args.max_pages:
break
except onenote.NotebookNotFound as e:
logger.error(str(e))
pipe.done()
stop_time = time.perf_counter()
logger.info('Done!')
logger.info('%s pages in %.1f seconds', pages, stop_time - start_time)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('notebook', help='display name of notebook to dump')
parser.add_argument('output_dir', help='directory to which to output')
parser.add_argument(
'-m', '--max-pages', type=int, help='max pages to dump'
)
parser.add_argument(
'-s', '--start-page', type=int, help='start page number to dump'
)
parser.add_argument(
'-n',
'--new-session',
action="store_true",
help='ignore saved auth token',
)
parser.add_argument(
'-v', '--verbose', action="store_true", help='show verbose output'
)
return parser.parse_args()
main()
| [
"logging.getLogger",
"argparse.ArgumentParser",
"pathlib.Path",
"onenote.get_notebook_pages",
"time.perf_counter",
"onenote_auth.get_session",
"pipeline.Pipeline",
"log.setup_logging"
] | [((141, 160), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (158, 160), False, 'import logging\n'), ((451, 493), 'onenote_auth.get_session', 'onenote_auth.get_session', (['args.new_session'], {}), '(args.new_session)\n', (475, 493), False, 'import onenote_auth\n'), ((512, 541), 'pathlib.Path', 'pathlib.Path', (['args.output_dir'], {}), '(args.output_dir)\n', (524, 541), False, 'import pathlib\n'), ((657, 676), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (674, 676), False, 'import time\n'), ((688, 735), 'pipeline.Pipeline', 'pipeline.Pipeline', (['s', 'args.notebook', 'output_dir'], {}), '(s, args.notebook, output_dir)\n', (705, 735), False, 'import pipeline\n'), ((1365, 1384), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1382, 1384), False, 'import time\n'), ((1518, 1543), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1541, 1543), False, 'import argparse\n'), ((228, 260), 'log.setup_logging', 'log.setup_logging', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (245, 260), False, 'import log\n'), ((279, 310), 'log.setup_logging', 'log.setup_logging', (['logging.INFO'], {}), '(logging.INFO)\n', (296, 310), False, 'import log\n'), ((814, 858), 'onenote.get_notebook_pages', 'onenote.get_notebook_pages', (['s', 'args.notebook'], {}), '(s, args.notebook)\n', (840, 858), False, 'import onenote\n')] |
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import json
import pytest
from ansible.module_utils._text import to_text
# Magic... Incorrectly identified by pylint as unused
from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import
from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import
from ansible_collections.community.aws.plugins.modules import data_pipeline
# test_api_gateway.py requires the `boto3` and `botocore` modules
boto3 = pytest.importorskip('boto3')
@pytest.fixture(scope='module')
def dp_setup():
"""
Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects
This fixture is module-scoped, since this can be reused for multiple tests.
"""
Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"])
# get objects to use to test populating and activating the data pipeline
if not os.getenv('PLACEBO_RECORD'):
objects = [{"name": "Every 1 day",
"id": "DefaultSchedule",
"fields": []},
{"name": "Default",
"id": "Default",
"fields": []}]
else:
s3 = boto3.client('s3')
data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json")
objects = json.loads(to_text(data['Body'].read()))
# create a module with vanilla data pipeline parameters
params = {'name': 'ansible-test-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'objects': [],
'tags': {},
'parameters': [],
'values': []}
module = FakeModule(**params)
# yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline)
if not os.getenv('PLACEBO_RECORD'):
yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects)
else:
connection = boto3.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, module)
data_pipeline_id = result['data_pipeline']['pipeline_id']
yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects)
# remove data pipeline
if os.getenv('PLACEBO_RECORD'):
module.params.update(state='absent')
data_pipeline.delete_pipeline(connection, module)
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, dp_setup.module)
assert changed is False
assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg']
def test_pipeline_field(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState")
assert pipeline_field_info == "PENDING"
def test_define_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id)
assert 'has been updated' in result
def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module)
assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg']
def test_activate_without_population(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error_message:
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert error_message == "You need to populate your pipeline before activation."
def test_create_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'tags': {}}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created."
data_pipeline.delete_pipeline(connection, m)
def test_create_pipeline_with_tags(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline_tags',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created."
data_pipeline.delete_pipeline(connection, m)
def test_delete_nonexistent_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is False
def test_delete_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
data_pipeline.create_pipeline(connection, m)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is True
def test_build_unique_id_different():
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'})
assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2)
def test_build_unique_id_same():
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_build_unique_id_obj():
# check that the object can be different and the unique id should be the same; should be able to modify objects
m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_format_tags():
unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
for tag_set in formatted_tags:
assert unformatted_tags[tag_set['key']] == tag_set['value']
def test_format_empty_tags():
unformatted_tags = {}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
assert formatted_tags == []
def test_pipeline_description(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
dp_id = dp_setup.data_pipeline_id
pipelines = data_pipeline.pipeline_description(connection, dp_id)
assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId']
def test_pipeline_description_nonexistent(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error:
data_pipeline.pipeline_description(connection, hypothetical_pipeline_id)
assert error == data_pipeline.DataPipelineNotFound
def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id)
assert exists is True
def test_check_dp_exists_false(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id)
assert exists is False
def test_check_dp_status(placeboify, maybe_sleep, dp_setup):
inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
connection = placeboify.client('datapipeline')
state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states)
assert state is True
def test_activate_pipeline(placeboify, maybe_sleep, dp_setup):
# use objects to define pipeline before activating
connection = placeboify.client('datapipeline')
data_pipeline.define_pipeline(connection,
module=dp_setup.module,
objects=dp_setup.objects,
dp_id=dp_setup.data_pipeline_id)
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert changed is True
| [
"ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id",
"ansible_collections.community.aws.plugins.modules.data_pipeline.check_dp_status",
"collections.namedtuple",
"ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline",
"os.getenv",
"ansible_collectio... | [((839, 867), 'pytest.importorskip', 'pytest.importorskip', (['"""boto3"""'], {}), "('boto3')\n", (858, 867), False, 'import pytest\n'), ((871, 901), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (885, 901), False, 'import pytest\n'), ((1136, 1221), 'collections.namedtuple', 'collections.namedtuple', (['"""Dependencies"""', "['module', 'data_pipeline_id', 'objects']"], {}), "('Dependencies', ['module', 'data_pipeline_id',\n 'objects'])\n", (1158, 1221), False, 'import collections\n'), ((2762, 2789), 'os.getenv', 'os.getenv', (['"""PLACEBO_RECORD"""'], {}), "('PLACEBO_RECORD')\n", (2771, 2789), False, 'import os\n'), ((3326, 3359), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (3343, 3359), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((3382, 3440), 'ansible_collections.community.aws.plugins.modules.data_pipeline.create_pipeline', 'data_pipeline.create_pipeline', (['connection', 'dp_setup.module'], {}), '(connection, dp_setup.module)\n', (3411, 3440), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((3632, 3665), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (3649, 3665), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((3692, 3781), 'ansible_collections.community.aws.plugins.modules.data_pipeline.pipeline_field', 'data_pipeline.pipeline_field', (['connection', 'dp_setup.data_pipeline_id', '"""@pipelineState"""'], {}), "(connection, dp_setup.data_pipeline_id,\n '@pipelineState')\n", (3720, 3781), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((3902, 3935), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (3919, 3935), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((3958, 4065), 'ansible_collections.community.aws.plugins.modules.data_pipeline.define_pipeline', 'data_pipeline.define_pipeline', (['connection', 'dp_setup.module', 'dp_setup.objects', 'dp_setup.data_pipeline_id'], {}), '(connection, dp_setup.module, dp_setup.objects,\n dp_setup.data_pipeline_id)\n', (3987, 4065), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((4186, 4219), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (4203, 4219), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((4242, 4304), 'ansible_collections.community.aws.plugins.modules.data_pipeline.deactivate_pipeline', 'data_pipeline.deactivate_pipeline', (['connection', 'dp_setup.module'], {}), '(connection, dp_setup.module)\n', (4275, 4304), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((4482, 4515), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (4499, 4515), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((4813, 4846), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (4830, 4846), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((5109, 5153), 'ansible_collections.community.aws.plugins.modules.data_pipeline.create_pipeline', 'data_pipeline.create_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (5138, 5153), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((5272, 5316), 'ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline', 'data_pipeline.delete_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (5301, 5316), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((5397, 5430), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (5414, 5430), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((5715, 5759), 'ansible_collections.community.aws.plugins.modules.data_pipeline.create_pipeline', 'data_pipeline.create_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (5744, 5759), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((5883, 5927), 'ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline', 'data_pipeline.delete_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (5912, 5927), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((6010, 6043), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (6027, 6043), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((6337, 6381), 'ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline', 'data_pipeline.delete_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (6366, 6381), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((6480, 6513), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (6497, 6513), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((6789, 6833), 'ansible_collections.community.aws.plugins.modules.data_pipeline.create_pipeline', 'data_pipeline.create_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (6818, 6833), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((6856, 6900), 'ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline', 'data_pipeline.delete_pipeline', (['connection', 'm'], {}), '(connection, m)\n', (6885, 6900), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((8105, 8148), 'ansible_collections.community.aws.plugins.modules.data_pipeline.format_tags', 'data_pipeline.format_tags', (['unformatted_tags'], {}), '(unformatted_tags)\n', (8130, 8148), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((8331, 8374), 'ansible_collections.community.aws.plugins.modules.data_pipeline.format_tags', 'data_pipeline.format_tags', (['unformatted_tags'], {}), '(unformatted_tags)\n', (8356, 8374), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((8492, 8525), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (8509, 8525), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((8580, 8633), 'ansible_collections.community.aws.plugins.modules.data_pipeline.pipeline_description', 'data_pipeline.pipeline_description', (['connection', 'dp_id'], {}), '(connection, dp_id)\n', (8614, 8633), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((8852, 8885), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (8869, 8885), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((9155, 9188), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (9172, 9188), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((9202, 9270), 'ansible_collections.community.aws.plugins.modules.data_pipeline.check_dp_exists', 'data_pipeline.check_dp_exists', (['connection', 'dp_setup.data_pipeline_id'], {}), '(connection, dp_setup.data_pipeline_id)\n', (9231, 9270), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((9430, 9463), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (9447, 9463), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((9477, 9544), 'ansible_collections.community.aws.plugins.modules.data_pipeline.check_dp_exists', 'data_pipeline.check_dp_exists', (['connection', 'hypothetical_pipeline_id'], {}), '(connection, hypothetical_pipeline_id)\n', (9506, 9544), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((9722, 9755), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (9739, 9755), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((9768, 9857), 'ansible_collections.community.aws.plugins.modules.data_pipeline.check_dp_status', 'data_pipeline.check_dp_status', (['connection', 'dp_setup.data_pipeline_id', 'inactive_states'], {}), '(connection, dp_setup.data_pipeline_id,\n inactive_states)\n', (9797, 9857), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((10016, 10049), 'ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures.placeboify.client', 'placeboify.client', (['"""datapipeline"""'], {}), "('datapipeline')\n", (10033, 10049), False, 'from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify\n'), ((10054, 10183), 'ansible_collections.community.aws.plugins.modules.data_pipeline.define_pipeline', 'data_pipeline.define_pipeline', (['connection'], {'module': 'dp_setup.module', 'objects': 'dp_setup.objects', 'dp_id': 'dp_setup.data_pipeline_id'}), '(connection, module=dp_setup.module, objects=\n dp_setup.objects, dp_id=dp_setup.data_pipeline_id)\n', (10083, 10183), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((10303, 10363), 'ansible_collections.community.aws.plugins.modules.data_pipeline.activate_pipeline', 'data_pipeline.activate_pipeline', (['connection', 'dp_setup.module'], {}), '(connection, dp_setup.module)\n', (10334, 10363), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((1307, 1334), 'os.getenv', 'os.getenv', (['"""PLACEBO_RECORD"""'], {}), "('PLACEBO_RECORD')\n", (1316, 1334), False, 'import os\n'), ((2299, 2326), 'os.getenv', 'os.getenv', (['"""PLACEBO_RECORD"""'], {}), "('PLACEBO_RECORD')\n", (2308, 2326), False, 'import os\n'), ((2517, 2566), 'ansible_collections.community.aws.plugins.modules.data_pipeline.create_pipeline', 'data_pipeline.create_pipeline', (['connection', 'module'], {}), '(connection, module)\n', (2546, 2566), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((2844, 2893), 'ansible_collections.community.aws.plugins.modules.data_pipeline.delete_pipeline', 'data_pipeline.delete_pipeline', (['connection', 'module'], {}), '(connection, module)\n', (2873, 2893), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((4525, 4549), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4538, 4549), False, 'import pytest\n'), ((4594, 4654), 'ansible_collections.community.aws.plugins.modules.data_pipeline.activate_pipeline', 'data_pipeline.activate_pipeline', (['connection', 'dp_setup.module'], {}), '(connection, dp_setup.module)\n', (4625, 4654), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7162, 7194), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m'], {}), '(m)\n', (7191, 7194), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7198, 7231), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m2'], {}), '(m2)\n', (7227, 7231), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7509, 7541), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m'], {}), '(m)\n', (7538, 7541), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7545, 7578), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m2'], {}), '(m2)\n', (7574, 7578), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7916, 7948), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m'], {}), '(m)\n', (7945, 7948), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((7952, 7985), 'ansible_collections.community.aws.plugins.modules.data_pipeline.build_unique_id', 'data_pipeline.build_unique_id', (['m2'], {}), '(m2)\n', (7981, 7985), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n'), ((8895, 8919), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8908, 8919), False, 'import pytest\n'), ((8938, 9010), 'ansible_collections.community.aws.plugins.modules.data_pipeline.pipeline_description', 'data_pipeline.pipeline_description', (['connection', 'hypothetical_pipeline_id'], {}), '(connection, hypothetical_pipeline_id)\n', (8972, 9010), False, 'from ansible_collections.community.aws.plugins.modules import data_pipeline\n')] |
import math
import requests
from pygitbucket.exceptions import (
UnknownError,
InvalidIDError,
NotFoundIDError,
NotAuthenticatedError,
PermissionError,
)
class Client:
BASE_URL = "https://api.bitbucket.org/"
def __init__(self, user: str, password: str, owner=None):
"""Initial session with user/password, and setup repository owner
Args:
params:
Returns:
"""
self.user = user
self.password = password
user_data = self.get_user()
# for shared repo, set baseURL to owner
if owner is None:
owner = user_data.get("username")
self.username = owner
def get_user(self, params=None):
"""Returns the currently logged in user.
Args:
params:
Returns:
"""
return self._get("2.0/user", params=params)
def get_privileges(self, params=None):
"""Gets a list of all the privilege across all an account's repositories.
If a repository has no individual users with privileges, it does not appear in this list.
Only the repository owner, a team account administrator, or an account with administrative
rights on the repository can make this call. This method has the following parameters:
Args:
params:
Returns:
"""
return self._get(f"1.0/privileges/{self.username}", params=params)
def get_repositories(self, params=None):
"""Returns a paginated list of all repositories owned by the specified account or UUID.
The result can be narrowed down based on the authenticated user's role.
E.g. with ?role=contributor, only those repositories that the authenticated user has write access to are
returned (this includes any repo the user is an admin on, as that implies write access).
This endpoint also supports filtering and sorting of the results. See filtering and sorting for more details.
Args:
params:
Returns:
"""
return self._get(f"2.0/repositories/{self.username}", params=params)
def get_repository(self, repository_slug, params=None):
"""Returns the object describing this repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}",
params=params
)
def get_repository_pipelines(self, repository_slug, page=None, params=None):
"""Returns the object describing this repository's pipelines.
Args:
repository_slug:
page: page of the pipelines data
params:
Returns:
"""
page_num = str(page) if page else "1"
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/pipelines/?page={page_num}",
params=params,
)
def get_latest_pipelines(self, repository_slug, params=None):
"""Returns the object describing this repository's latest pipelines.
Args:
repository_slug:
params:
Returns:
"""
default_response = self.get_repository_pipelines(repository_slug)
num_pipelines = default_response["size"]
pages = math.ceil(num_pipelines / 10)
latest_pipelines = (
self.get_repository_pipelines(repository_slug, pages)["values"]
+ self.get_repository_pipelines(repository_slug, pages - 1)["values"]
)
return latest_pipelines
# UNDER TESTING !!
def get_last_pipeline(self, repository_slug, branch=None, params=None):
"""Returns the object describing this repository's latest pipelines.
Args:
repository_slug:
params:
Returns:
"""
default_response = self.get_repository_pipelines(repository_slug)
num_pipelines = default_response["size"]
pages = math.ceil(num_pipelines / 10)
last_pipelines = self.get_repository_pipelines(repository_slug, pages)["values"]
if branch:
last_pipelines = [
value for value in last_pipelines
if value['target']['ref_name'] == branch
]
last_pipelines.sort(key=lambda x: x['created_on'])
return last_pipelines[-1]
def get_repository_branches(self, repository_slug, params=None):
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/refs/branches",
params=params,
)
def get_repository_tags(self, repository_slug, params=None):
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/refs/tags",
params=params,
)
def get_repository_components(self, repository_slug, params=None):
"""Returns the components that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/components",
params=params,
)
def get_repository_milestones(self, repository_slug, params=None):
"""Returns the milestones that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/milestones",
params=params,
)
def get_repository_versions(self, repository_slug, params=None):
"""Returns the versions that have been defined in the issue tracker.
This resource is only available on repositories that have the issue tracker enabled.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/versions",
params=params,
)
def get_repository_source_code(self, repository_slug, params=None):
"""Returns data about the source code of given repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/src",
params=params
)
def get_repository_commit_path_source_code(
self, repository_slug, commit_hash, path, params=None
):
"""Returns source code of given path at specified commit_hash of given repository.
Args:
repository_slug:
commit_hash:
path:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/src/{commit_hash}/{path}",
params=params,
)
def trigger_pipeline(self, repository_slug, branch_name, params=None):
"""Triggers the pipeline for a branch of the repo.
This call requires authentication. Private repositories or private issue trackers require
the caller to authenticate with an account that has appropriate authorisation.
Args:
repository_slug:
branch_name: name of repo branch being deployed
data:
params:
The post data should be in the format:
{
"target": {
"ref_type": "branch",
"type": "pipeline_ref_target",
"ref_name": "branch_name"
}
}
Returns:
"""
data = {
"target": {
"ref_type": "branch",
"type": "pipeline_ref_target",
"ref_name": branch_name,
}
}
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/pipelines/",
data=data,
params=params,
)
def create_issue(self, repository_slug, title, description="", params=None):
"""Creates a new issue.
This call requires authentication. Private repositories or private issue trackers require
the caller to authenticate with an account that has appropriate authorisation.
The authenticated user is used for the issue's reporter field.
Args:
repository_slug:
data:
params:
The post data should be in the format:
{
"title":"title of the issue",
"content":{
"raw":"this should be the description"
}
}
Returns:
"""
data = {"title": title, "content": {"raw": description}}
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/issues",
data=data,
params=params,
)
def get_issue(self, repository_slug, issue_id, params=None):
"""Returns the specified issue.
Args:
repository_slug:
issue_id:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/issues/{issue_id}",
params=params,
)
def get_issues(self, repository_slug, params=None):
"""Returns the issues in the issue tracker.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/issues",
params=params
)
def delete_issue(self, repository_slug, issue_id, params=None):
"""Deletes the specified issue. This requires write access to the repository.
Args:
repository_slug:
issue_id:
params:
Returns:
"""
return self._delete(
f"2.0/repositories/{self.username}/{repository_slug}/issues/{issue_id}",
params=params,
)
def create_webhook(self, repository_slug, data, params=None):
"""Creates a new webhook on the specified repository.
Example:
{
"description": "Webhook Description",
"url": "https://example.com/",
"active": true,
"events": [
"repo:push",
"issue:created",
"issue:updated"
]
}
Note that this call requires the webhook scope, as well as any scope that applies to the events
that the webhook subscribes to. In the example above that means: webhook, repository and issue.
Also note that the url must properly resolve and cannot be an internal, non-routed address.
Args:
repository_slug:
data:
params:
Returns:
"""
return self._post(
f"2.0/repositories/{self.username}/{repository_slug}/hooks",
data=data,
params=params,
)
def get_webhook(self, repository_slug, webhook_uid, params=None):
"""Returns the webhook with the specified id installed on the specified repository.
Args:
repository_slug:
webhook_uid:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/hooks/{webhook_uid}",
params=params,
)
def get_webhooks(self, repository_slug, params=None):
"""Returns a paginated list of webhooks installed on this repository.
Args:
repository_slug:
params:
Returns:
"""
return self._get(
f"2.0/repositories/{self.username}/{repository_slug}/hooks",
params=params
)
def delete_webhook(self, repository_slug, webhook_uid, params=None):
"""Deletes the specified webhook subscription from the given repository.
Args:
repository_slug:
webhook_uid:
params:
Returns:
"""
return self._delete(
f"2.0/repositories/{self.username}/{repository_slug}/hooks/{webhook_uid}",
params=params,
)
def _get(self, endpoint, params=None):
response = requests.get(
self.BASE_URL + endpoint, params=params, auth=(self.user, self.password)
)
return self._parse(response)
def _post(self, endpoint, params=None, data=None):
response = requests.post(
self.BASE_URL + endpoint,
params=params,
json=data,
auth=(self.user, self.password),
)
return self._parse(response)
def _put(self, endpoint, params=None, data=None):
response = requests.put(
self.BASE_URL + endpoint,
params=params,
json=data,
auth=(self.user, self.password),
)
return self._parse(response)
def _delete(self, endpoint, params=None):
response = requests.delete(
self.BASE_URL + endpoint, params=params, auth=(self.user, self.password)
)
return self._parse(response)
def _parse(self, response):
status_code = response.status_code
if "application/json" in response.headers["Content-Type"]:
r = response.json()
else:
r = response.text
if status_code in (200, 201):
return r
if status_code == 204:
return None
message = None
try:
if "errorMessages" in r:
message = r["errorMessages"]
except Exception:
message = "No error message."
if status_code == 400:
raise InvalidIDError(message)
if status_code == 401:
raise NotAuthenticatedError(message)
if status_code == 403:
raise PermissionError(message)
if status_code == 404:
raise NotFoundIDError(message)
raise UnknownError(message)
| [
"requests.post",
"math.ceil",
"pygitbucket.exceptions.PermissionError",
"requests.get",
"requests.delete",
"pygitbucket.exceptions.NotAuthenticatedError",
"pygitbucket.exceptions.NotFoundIDError",
"requests.put",
"pygitbucket.exceptions.InvalidIDError",
"pygitbucket.exceptions.UnknownError"
] | [((3363, 3392), 'math.ceil', 'math.ceil', (['(num_pipelines / 10)'], {}), '(num_pipelines / 10)\n', (3372, 3392), False, 'import math\n'), ((4033, 4062), 'math.ceil', 'math.ceil', (['(num_pipelines / 10)'], {}), '(num_pipelines / 10)\n', (4042, 4062), False, 'import math\n'), ((12627, 12718), 'requests.get', 'requests.get', (['(self.BASE_URL + endpoint)'], {'params': 'params', 'auth': '(self.user, self.password)'}), '(self.BASE_URL + endpoint, params=params, auth=(self.user, self\n .password))\n', (12639, 12718), False, 'import requests\n'), ((12848, 12951), 'requests.post', 'requests.post', (['(self.BASE_URL + endpoint)'], {'params': 'params', 'json': 'data', 'auth': '(self.user, self.password)'}), '(self.BASE_URL + endpoint, params=params, json=data, auth=(\n self.user, self.password))\n', (12861, 12951), False, 'import requests\n'), ((13117, 13219), 'requests.put', 'requests.put', (['(self.BASE_URL + endpoint)'], {'params': 'params', 'json': 'data', 'auth': '(self.user, self.password)'}), '(self.BASE_URL + endpoint, params=params, json=data, auth=(self\n .user, self.password))\n', (13129, 13219), False, 'import requests\n'), ((13377, 13470), 'requests.delete', 'requests.delete', (['(self.BASE_URL + endpoint)'], {'params': 'params', 'auth': '(self.user, self.password)'}), '(self.BASE_URL + endpoint, params=params, auth=(self.user,\n self.password))\n', (13392, 13470), False, 'import requests\n'), ((14360, 14381), 'pygitbucket.exceptions.UnknownError', 'UnknownError', (['message'], {}), '(message)\n', (14372, 14381), False, 'from pygitbucket.exceptions import UnknownError, InvalidIDError, NotFoundIDError, NotAuthenticatedError, PermissionError\n'), ((14094, 14117), 'pygitbucket.exceptions.InvalidIDError', 'InvalidIDError', (['message'], {}), '(message)\n', (14108, 14117), False, 'from pygitbucket.exceptions import UnknownError, InvalidIDError, NotFoundIDError, NotAuthenticatedError, PermissionError\n'), ((14167, 14197), 'pygitbucket.exceptions.NotAuthenticatedError', 'NotAuthenticatedError', (['message'], {}), '(message)\n', (14188, 14197), False, 'from pygitbucket.exceptions import UnknownError, InvalidIDError, NotFoundIDError, NotAuthenticatedError, PermissionError\n'), ((14247, 14271), 'pygitbucket.exceptions.PermissionError', 'PermissionError', (['message'], {}), '(message)\n', (14262, 14271), False, 'from pygitbucket.exceptions import UnknownError, InvalidIDError, NotFoundIDError, NotAuthenticatedError, PermissionError\n'), ((14321, 14345), 'pygitbucket.exceptions.NotFoundIDError', 'NotFoundIDError', (['message'], {}), '(message)\n', (14336, 14345), False, 'from pygitbucket.exceptions import UnknownError, InvalidIDError, NotFoundIDError, NotAuthenticatedError, PermissionError\n')] |
"""
@file
@brief Test for :epkg:`cartopy`.
"""
import numpy
import numba
@numba.jit(nopython=True, parallel=True)
def logistic_regression(Y, X, w, iterations):
"Fits a logistic regression."
for _ in range(iterations):
w -= numpy.dot(((1.0 / (1.0 + numpy.exp(-Y * numpy.dot(X, w))) - 1.0) * Y), X)
return w
def check_numba():
"""
Runs a sample with :epkg:`numba`.
"""
Y = numpy.random.rand(10).astype(numpy.double)
X = numpy.random.rand(10, 2).astype(numpy.double)
w = numpy.random.rand(2).astype(numpy.double)
return logistic_regression(Y, X, w, 2)
| [
"numpy.dot",
"numba.jit",
"numpy.random.rand"
] | [((76, 115), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (85, 115), False, 'import numba\n'), ((411, 432), 'numpy.random.rand', 'numpy.random.rand', (['(10)'], {}), '(10)\n', (428, 432), False, 'import numpy\n'), ((462, 486), 'numpy.random.rand', 'numpy.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (479, 486), False, 'import numpy\n'), ((516, 536), 'numpy.random.rand', 'numpy.random.rand', (['(2)'], {}), '(2)\n', (533, 536), False, 'import numpy\n'), ((281, 296), 'numpy.dot', 'numpy.dot', (['X', 'w'], {}), '(X, w)\n', (290, 296), False, 'import numpy\n')] |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('__block', [dirname(__file__)])
except ImportError:
import __block
return __block
if fp is not None:
try:
_mod = imp.load_module('__block', fp, pathname, description)
finally:
fp.close()
return _mod
__block = swig_import_helper()
del swig_import_helper
else:
import __block
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def gsl_vector_set_zero(*args, **kwargs):
return __block.gsl_vector_set_zero(*args, **kwargs)
gsl_vector_set_zero = __block.gsl_vector_set_zero
def gsl_vector_set_all(*args, **kwargs):
return __block.gsl_vector_set_all(*args, **kwargs)
gsl_vector_set_all = __block.gsl_vector_set_all
def gsl_vector_set_basis(*args, **kwargs):
return __block.gsl_vector_set_basis(*args, **kwargs)
gsl_vector_set_basis = __block.gsl_vector_set_basis
def gsl_vector_fread(*args, **kwargs):
return __block.gsl_vector_fread(*args, **kwargs)
gsl_vector_fread = __block.gsl_vector_fread
def gsl_vector_fwrite(*args, **kwargs):
return __block.gsl_vector_fwrite(*args, **kwargs)
gsl_vector_fwrite = __block.gsl_vector_fwrite
def gsl_vector_fscanf(*args, **kwargs):
return __block.gsl_vector_fscanf(*args, **kwargs)
gsl_vector_fscanf = __block.gsl_vector_fscanf
def gsl_vector_fprintf(*args, **kwargs):
return __block.gsl_vector_fprintf(*args, **kwargs)
gsl_vector_fprintf = __block.gsl_vector_fprintf
def gsl_vector_reverse(*args, **kwargs):
return __block.gsl_vector_reverse(*args, **kwargs)
gsl_vector_reverse = __block.gsl_vector_reverse
def gsl_vector_swap(*args, **kwargs):
return __block.gsl_vector_swap(*args, **kwargs)
gsl_vector_swap = __block.gsl_vector_swap
def gsl_vector_swap_elements(*args, **kwargs):
return __block.gsl_vector_swap_elements(*args, **kwargs)
gsl_vector_swap_elements = __block.gsl_vector_swap_elements
def gsl_vector_max(*args, **kwargs):
return __block.gsl_vector_max(*args, **kwargs)
gsl_vector_max = __block.gsl_vector_max
def gsl_vector_min(*args, **kwargs):
return __block.gsl_vector_min(*args, **kwargs)
gsl_vector_min = __block.gsl_vector_min
def gsl_vector_minmax(*args, **kwargs):
return __block.gsl_vector_minmax(*args, **kwargs)
gsl_vector_minmax = __block.gsl_vector_minmax
def gsl_vector_max_index(*args, **kwargs):
return __block.gsl_vector_max_index(*args, **kwargs)
gsl_vector_max_index = __block.gsl_vector_max_index
def gsl_vector_min_index(*args, **kwargs):
return __block.gsl_vector_min_index(*args, **kwargs)
gsl_vector_min_index = __block.gsl_vector_min_index
def gsl_vector_minmax_index(*args, **kwargs):
return __block.gsl_vector_minmax_index(*args, **kwargs)
gsl_vector_minmax_index = __block.gsl_vector_minmax_index
def gsl_vector_isnull(*args, **kwargs):
return __block.gsl_vector_isnull(*args, **kwargs)
gsl_vector_isnull = __block.gsl_vector_isnull
def gsl_matrix_set_zero(*args, **kwargs):
return __block.gsl_matrix_set_zero(*args, **kwargs)
gsl_matrix_set_zero = __block.gsl_matrix_set_zero
def gsl_matrix_set_all(*args, **kwargs):
return __block.gsl_matrix_set_all(*args, **kwargs)
gsl_matrix_set_all = __block.gsl_matrix_set_all
def gsl_matrix_set_identity(*args, **kwargs):
return __block.gsl_matrix_set_identity(*args, **kwargs)
gsl_matrix_set_identity = __block.gsl_matrix_set_identity
def gsl_matrix_fread(*args, **kwargs):
return __block.gsl_matrix_fread(*args, **kwargs)
gsl_matrix_fread = __block.gsl_matrix_fread
def gsl_matrix_fwrite(*args, **kwargs):
return __block.gsl_matrix_fwrite(*args, **kwargs)
gsl_matrix_fwrite = __block.gsl_matrix_fwrite
def gsl_matrix_fscanf(*args, **kwargs):
return __block.gsl_matrix_fscanf(*args, **kwargs)
gsl_matrix_fscanf = __block.gsl_matrix_fscanf
def gsl_matrix_fprintf(*args, **kwargs):
return __block.gsl_matrix_fprintf(*args, **kwargs)
gsl_matrix_fprintf = __block.gsl_matrix_fprintf
def gsl_matrix_swap(*args, **kwargs):
return __block.gsl_matrix_swap(*args, **kwargs)
gsl_matrix_swap = __block.gsl_matrix_swap
def gsl_matrix_swap_rows(*args, **kwargs):
return __block.gsl_matrix_swap_rows(*args, **kwargs)
gsl_matrix_swap_rows = __block.gsl_matrix_swap_rows
def gsl_matrix_swap_columns(*args, **kwargs):
return __block.gsl_matrix_swap_columns(*args, **kwargs)
gsl_matrix_swap_columns = __block.gsl_matrix_swap_columns
def gsl_matrix_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_swap_rowcol(*args, **kwargs)
gsl_matrix_swap_rowcol = __block.gsl_matrix_swap_rowcol
def gsl_matrix_transpose(*args, **kwargs):
return __block.gsl_matrix_transpose(*args, **kwargs)
gsl_matrix_transpose = __block.gsl_matrix_transpose
def gsl_matrix_max(*args, **kwargs):
return __block.gsl_matrix_max(*args, **kwargs)
gsl_matrix_max = __block.gsl_matrix_max
def gsl_matrix_min(*args, **kwargs):
return __block.gsl_matrix_min(*args, **kwargs)
gsl_matrix_min = __block.gsl_matrix_min
def gsl_matrix_minmax(*args, **kwargs):
return __block.gsl_matrix_minmax(*args, **kwargs)
gsl_matrix_minmax = __block.gsl_matrix_minmax
def gsl_matrix_max_index(*args, **kwargs):
return __block.gsl_matrix_max_index(*args, **kwargs)
gsl_matrix_max_index = __block.gsl_matrix_max_index
def gsl_matrix_min_index(*args, **kwargs):
return __block.gsl_matrix_min_index(*args, **kwargs)
gsl_matrix_min_index = __block.gsl_matrix_min_index
def gsl_matrix_minmax_index(*args, **kwargs):
return __block.gsl_matrix_minmax_index(*args, **kwargs)
gsl_matrix_minmax_index = __block.gsl_matrix_minmax_index
def gsl_matrix_isnull(*args, **kwargs):
return __block.gsl_matrix_isnull(*args, **kwargs)
gsl_matrix_isnull = __block.gsl_matrix_isnull
def gsl_matrix_diagonal(*args, **kwargs):
return __block.gsl_matrix_diagonal(*args, **kwargs)
gsl_matrix_diagonal = __block.gsl_matrix_diagonal
def gsl_matrix_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_subdiagonal(*args, **kwargs)
gsl_matrix_subdiagonal = __block.gsl_matrix_subdiagonal
def gsl_matrix_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_superdiagonal(*args, **kwargs)
gsl_matrix_superdiagonal = __block.gsl_matrix_superdiagonal
def gsl_vector_float_set_zero(*args, **kwargs):
return __block.gsl_vector_float_set_zero(*args, **kwargs)
gsl_vector_float_set_zero = __block.gsl_vector_float_set_zero
def gsl_vector_float_set_all(*args, **kwargs):
return __block.gsl_vector_float_set_all(*args, **kwargs)
gsl_vector_float_set_all = __block.gsl_vector_float_set_all
def gsl_vector_float_set_basis(*args, **kwargs):
return __block.gsl_vector_float_set_basis(*args, **kwargs)
gsl_vector_float_set_basis = __block.gsl_vector_float_set_basis
def gsl_vector_float_fread(*args, **kwargs):
return __block.gsl_vector_float_fread(*args, **kwargs)
gsl_vector_float_fread = __block.gsl_vector_float_fread
def gsl_vector_float_fwrite(*args, **kwargs):
return __block.gsl_vector_float_fwrite(*args, **kwargs)
gsl_vector_float_fwrite = __block.gsl_vector_float_fwrite
def gsl_vector_float_fscanf(*args, **kwargs):
return __block.gsl_vector_float_fscanf(*args, **kwargs)
gsl_vector_float_fscanf = __block.gsl_vector_float_fscanf
def gsl_vector_float_fprintf(*args, **kwargs):
return __block.gsl_vector_float_fprintf(*args, **kwargs)
gsl_vector_float_fprintf = __block.gsl_vector_float_fprintf
def gsl_vector_float_reverse(*args, **kwargs):
return __block.gsl_vector_float_reverse(*args, **kwargs)
gsl_vector_float_reverse = __block.gsl_vector_float_reverse
def gsl_vector_float_swap(*args, **kwargs):
return __block.gsl_vector_float_swap(*args, **kwargs)
gsl_vector_float_swap = __block.gsl_vector_float_swap
def gsl_vector_float_swap_elements(*args, **kwargs):
return __block.gsl_vector_float_swap_elements(*args, **kwargs)
gsl_vector_float_swap_elements = __block.gsl_vector_float_swap_elements
def gsl_vector_float_max(*args, **kwargs):
return __block.gsl_vector_float_max(*args, **kwargs)
gsl_vector_float_max = __block.gsl_vector_float_max
def gsl_vector_float_min(*args, **kwargs):
return __block.gsl_vector_float_min(*args, **kwargs)
gsl_vector_float_min = __block.gsl_vector_float_min
def gsl_vector_float_minmax(*args, **kwargs):
return __block.gsl_vector_float_minmax(*args, **kwargs)
gsl_vector_float_minmax = __block.gsl_vector_float_minmax
def gsl_vector_float_max_index(*args, **kwargs):
return __block.gsl_vector_float_max_index(*args, **kwargs)
gsl_vector_float_max_index = __block.gsl_vector_float_max_index
def gsl_vector_float_min_index(*args, **kwargs):
return __block.gsl_vector_float_min_index(*args, **kwargs)
gsl_vector_float_min_index = __block.gsl_vector_float_min_index
def gsl_vector_float_minmax_index(*args, **kwargs):
return __block.gsl_vector_float_minmax_index(*args, **kwargs)
gsl_vector_float_minmax_index = __block.gsl_vector_float_minmax_index
def gsl_vector_float_isnull(*args, **kwargs):
return __block.gsl_vector_float_isnull(*args, **kwargs)
gsl_vector_float_isnull = __block.gsl_vector_float_isnull
def gsl_matrix_float_set_zero(*args, **kwargs):
return __block.gsl_matrix_float_set_zero(*args, **kwargs)
gsl_matrix_float_set_zero = __block.gsl_matrix_float_set_zero
def gsl_matrix_float_set_all(*args, **kwargs):
return __block.gsl_matrix_float_set_all(*args, **kwargs)
gsl_matrix_float_set_all = __block.gsl_matrix_float_set_all
def gsl_matrix_float_set_identity(*args, **kwargs):
return __block.gsl_matrix_float_set_identity(*args, **kwargs)
gsl_matrix_float_set_identity = __block.gsl_matrix_float_set_identity
def gsl_matrix_float_fread(*args, **kwargs):
return __block.gsl_matrix_float_fread(*args, **kwargs)
gsl_matrix_float_fread = __block.gsl_matrix_float_fread
def gsl_matrix_float_fwrite(*args, **kwargs):
return __block.gsl_matrix_float_fwrite(*args, **kwargs)
gsl_matrix_float_fwrite = __block.gsl_matrix_float_fwrite
def gsl_matrix_float_fscanf(*args, **kwargs):
return __block.gsl_matrix_float_fscanf(*args, **kwargs)
gsl_matrix_float_fscanf = __block.gsl_matrix_float_fscanf
def gsl_matrix_float_fprintf(*args, **kwargs):
return __block.gsl_matrix_float_fprintf(*args, **kwargs)
gsl_matrix_float_fprintf = __block.gsl_matrix_float_fprintf
def gsl_matrix_float_swap(*args, **kwargs):
return __block.gsl_matrix_float_swap(*args, **kwargs)
gsl_matrix_float_swap = __block.gsl_matrix_float_swap
def gsl_matrix_float_swap_rows(*args, **kwargs):
return __block.gsl_matrix_float_swap_rows(*args, **kwargs)
gsl_matrix_float_swap_rows = __block.gsl_matrix_float_swap_rows
def gsl_matrix_float_swap_columns(*args, **kwargs):
return __block.gsl_matrix_float_swap_columns(*args, **kwargs)
gsl_matrix_float_swap_columns = __block.gsl_matrix_float_swap_columns
def gsl_matrix_float_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_float_swap_rowcol(*args, **kwargs)
gsl_matrix_float_swap_rowcol = __block.gsl_matrix_float_swap_rowcol
def gsl_matrix_float_transpose(*args, **kwargs):
return __block.gsl_matrix_float_transpose(*args, **kwargs)
gsl_matrix_float_transpose = __block.gsl_matrix_float_transpose
def gsl_matrix_float_max(*args, **kwargs):
return __block.gsl_matrix_float_max(*args, **kwargs)
gsl_matrix_float_max = __block.gsl_matrix_float_max
def gsl_matrix_float_min(*args, **kwargs):
return __block.gsl_matrix_float_min(*args, **kwargs)
gsl_matrix_float_min = __block.gsl_matrix_float_min
def gsl_matrix_float_minmax(*args, **kwargs):
return __block.gsl_matrix_float_minmax(*args, **kwargs)
gsl_matrix_float_minmax = __block.gsl_matrix_float_minmax
def gsl_matrix_float_max_index(*args, **kwargs):
return __block.gsl_matrix_float_max_index(*args, **kwargs)
gsl_matrix_float_max_index = __block.gsl_matrix_float_max_index
def gsl_matrix_float_min_index(*args, **kwargs):
return __block.gsl_matrix_float_min_index(*args, **kwargs)
gsl_matrix_float_min_index = __block.gsl_matrix_float_min_index
def gsl_matrix_float_minmax_index(*args, **kwargs):
return __block.gsl_matrix_float_minmax_index(*args, **kwargs)
gsl_matrix_float_minmax_index = __block.gsl_matrix_float_minmax_index
def gsl_matrix_float_isnull(*args, **kwargs):
return __block.gsl_matrix_float_isnull(*args, **kwargs)
gsl_matrix_float_isnull = __block.gsl_matrix_float_isnull
def gsl_matrix_float_diagonal(*args, **kwargs):
return __block.gsl_matrix_float_diagonal(*args, **kwargs)
gsl_matrix_float_diagonal = __block.gsl_matrix_float_diagonal
def gsl_matrix_float_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_float_subdiagonal(*args, **kwargs)
gsl_matrix_float_subdiagonal = __block.gsl_matrix_float_subdiagonal
def gsl_matrix_float_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_float_superdiagonal(*args, **kwargs)
gsl_matrix_float_superdiagonal = __block.gsl_matrix_float_superdiagonal
def gsl_vector_long_set_zero(*args, **kwargs):
return __block.gsl_vector_long_set_zero(*args, **kwargs)
gsl_vector_long_set_zero = __block.gsl_vector_long_set_zero
def gsl_vector_long_set_all(*args, **kwargs):
return __block.gsl_vector_long_set_all(*args, **kwargs)
gsl_vector_long_set_all = __block.gsl_vector_long_set_all
def gsl_vector_long_set_basis(*args, **kwargs):
return __block.gsl_vector_long_set_basis(*args, **kwargs)
gsl_vector_long_set_basis = __block.gsl_vector_long_set_basis
def gsl_vector_long_fread(*args, **kwargs):
return __block.gsl_vector_long_fread(*args, **kwargs)
gsl_vector_long_fread = __block.gsl_vector_long_fread
def gsl_vector_long_fwrite(*args, **kwargs):
return __block.gsl_vector_long_fwrite(*args, **kwargs)
gsl_vector_long_fwrite = __block.gsl_vector_long_fwrite
def gsl_vector_long_fscanf(*args, **kwargs):
return __block.gsl_vector_long_fscanf(*args, **kwargs)
gsl_vector_long_fscanf = __block.gsl_vector_long_fscanf
def gsl_vector_long_fprintf(*args, **kwargs):
return __block.gsl_vector_long_fprintf(*args, **kwargs)
gsl_vector_long_fprintf = __block.gsl_vector_long_fprintf
def gsl_vector_long_reverse(*args, **kwargs):
return __block.gsl_vector_long_reverse(*args, **kwargs)
gsl_vector_long_reverse = __block.gsl_vector_long_reverse
def gsl_vector_long_swap(*args, **kwargs):
return __block.gsl_vector_long_swap(*args, **kwargs)
gsl_vector_long_swap = __block.gsl_vector_long_swap
def gsl_vector_long_swap_elements(*args, **kwargs):
return __block.gsl_vector_long_swap_elements(*args, **kwargs)
gsl_vector_long_swap_elements = __block.gsl_vector_long_swap_elements
def gsl_vector_long_max(*args, **kwargs):
return __block.gsl_vector_long_max(*args, **kwargs)
gsl_vector_long_max = __block.gsl_vector_long_max
def gsl_vector_long_min(*args, **kwargs):
return __block.gsl_vector_long_min(*args, **kwargs)
gsl_vector_long_min = __block.gsl_vector_long_min
def gsl_vector_long_minmax(*args, **kwargs):
return __block.gsl_vector_long_minmax(*args, **kwargs)
gsl_vector_long_minmax = __block.gsl_vector_long_minmax
def gsl_vector_long_max_index(*args, **kwargs):
return __block.gsl_vector_long_max_index(*args, **kwargs)
gsl_vector_long_max_index = __block.gsl_vector_long_max_index
def gsl_vector_long_min_index(*args, **kwargs):
return __block.gsl_vector_long_min_index(*args, **kwargs)
gsl_vector_long_min_index = __block.gsl_vector_long_min_index
def gsl_vector_long_minmax_index(*args, **kwargs):
return __block.gsl_vector_long_minmax_index(*args, **kwargs)
gsl_vector_long_minmax_index = __block.gsl_vector_long_minmax_index
def gsl_vector_long_isnull(*args, **kwargs):
return __block.gsl_vector_long_isnull(*args, **kwargs)
gsl_vector_long_isnull = __block.gsl_vector_long_isnull
def gsl_matrix_long_set_zero(*args, **kwargs):
return __block.gsl_matrix_long_set_zero(*args, **kwargs)
gsl_matrix_long_set_zero = __block.gsl_matrix_long_set_zero
def gsl_matrix_long_set_all(*args, **kwargs):
return __block.gsl_matrix_long_set_all(*args, **kwargs)
gsl_matrix_long_set_all = __block.gsl_matrix_long_set_all
def gsl_matrix_long_set_identity(*args, **kwargs):
return __block.gsl_matrix_long_set_identity(*args, **kwargs)
gsl_matrix_long_set_identity = __block.gsl_matrix_long_set_identity
def gsl_matrix_long_fread(*args, **kwargs):
return __block.gsl_matrix_long_fread(*args, **kwargs)
gsl_matrix_long_fread = __block.gsl_matrix_long_fread
def gsl_matrix_long_fwrite(*args, **kwargs):
return __block.gsl_matrix_long_fwrite(*args, **kwargs)
gsl_matrix_long_fwrite = __block.gsl_matrix_long_fwrite
def gsl_matrix_long_fscanf(*args, **kwargs):
return __block.gsl_matrix_long_fscanf(*args, **kwargs)
gsl_matrix_long_fscanf = __block.gsl_matrix_long_fscanf
def gsl_matrix_long_fprintf(*args, **kwargs):
return __block.gsl_matrix_long_fprintf(*args, **kwargs)
gsl_matrix_long_fprintf = __block.gsl_matrix_long_fprintf
def gsl_matrix_long_swap(*args, **kwargs):
return __block.gsl_matrix_long_swap(*args, **kwargs)
gsl_matrix_long_swap = __block.gsl_matrix_long_swap
def gsl_matrix_long_swap_rows(*args, **kwargs):
return __block.gsl_matrix_long_swap_rows(*args, **kwargs)
gsl_matrix_long_swap_rows = __block.gsl_matrix_long_swap_rows
def gsl_matrix_long_swap_columns(*args, **kwargs):
return __block.gsl_matrix_long_swap_columns(*args, **kwargs)
gsl_matrix_long_swap_columns = __block.gsl_matrix_long_swap_columns
def gsl_matrix_long_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_long_swap_rowcol(*args, **kwargs)
gsl_matrix_long_swap_rowcol = __block.gsl_matrix_long_swap_rowcol
def gsl_matrix_long_transpose(*args, **kwargs):
return __block.gsl_matrix_long_transpose(*args, **kwargs)
gsl_matrix_long_transpose = __block.gsl_matrix_long_transpose
def gsl_matrix_long_max(*args, **kwargs):
return __block.gsl_matrix_long_max(*args, **kwargs)
gsl_matrix_long_max = __block.gsl_matrix_long_max
def gsl_matrix_long_min(*args, **kwargs):
return __block.gsl_matrix_long_min(*args, **kwargs)
gsl_matrix_long_min = __block.gsl_matrix_long_min
def gsl_matrix_long_minmax(*args, **kwargs):
return __block.gsl_matrix_long_minmax(*args, **kwargs)
gsl_matrix_long_minmax = __block.gsl_matrix_long_minmax
def gsl_matrix_long_max_index(*args, **kwargs):
return __block.gsl_matrix_long_max_index(*args, **kwargs)
gsl_matrix_long_max_index = __block.gsl_matrix_long_max_index
def gsl_matrix_long_min_index(*args, **kwargs):
return __block.gsl_matrix_long_min_index(*args, **kwargs)
gsl_matrix_long_min_index = __block.gsl_matrix_long_min_index
def gsl_matrix_long_minmax_index(*args, **kwargs):
return __block.gsl_matrix_long_minmax_index(*args, **kwargs)
gsl_matrix_long_minmax_index = __block.gsl_matrix_long_minmax_index
def gsl_matrix_long_isnull(*args, **kwargs):
return __block.gsl_matrix_long_isnull(*args, **kwargs)
gsl_matrix_long_isnull = __block.gsl_matrix_long_isnull
def gsl_matrix_long_diagonal(*args, **kwargs):
return __block.gsl_matrix_long_diagonal(*args, **kwargs)
gsl_matrix_long_diagonal = __block.gsl_matrix_long_diagonal
def gsl_matrix_long_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_long_subdiagonal(*args, **kwargs)
gsl_matrix_long_subdiagonal = __block.gsl_matrix_long_subdiagonal
def gsl_matrix_long_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_long_superdiagonal(*args, **kwargs)
gsl_matrix_long_superdiagonal = __block.gsl_matrix_long_superdiagonal
def gsl_vector_int_set_zero(*args, **kwargs):
return __block.gsl_vector_int_set_zero(*args, **kwargs)
gsl_vector_int_set_zero = __block.gsl_vector_int_set_zero
def gsl_vector_int_set_all(*args, **kwargs):
return __block.gsl_vector_int_set_all(*args, **kwargs)
gsl_vector_int_set_all = __block.gsl_vector_int_set_all
def gsl_vector_int_set_basis(*args, **kwargs):
return __block.gsl_vector_int_set_basis(*args, **kwargs)
gsl_vector_int_set_basis = __block.gsl_vector_int_set_basis
def gsl_vector_int_fread(*args, **kwargs):
return __block.gsl_vector_int_fread(*args, **kwargs)
gsl_vector_int_fread = __block.gsl_vector_int_fread
def gsl_vector_int_fwrite(*args, **kwargs):
return __block.gsl_vector_int_fwrite(*args, **kwargs)
gsl_vector_int_fwrite = __block.gsl_vector_int_fwrite
def gsl_vector_int_fscanf(*args, **kwargs):
return __block.gsl_vector_int_fscanf(*args, **kwargs)
gsl_vector_int_fscanf = __block.gsl_vector_int_fscanf
def gsl_vector_int_fprintf(*args, **kwargs):
return __block.gsl_vector_int_fprintf(*args, **kwargs)
gsl_vector_int_fprintf = __block.gsl_vector_int_fprintf
def gsl_vector_int_reverse(*args, **kwargs):
return __block.gsl_vector_int_reverse(*args, **kwargs)
gsl_vector_int_reverse = __block.gsl_vector_int_reverse
def gsl_vector_int_swap(*args, **kwargs):
return __block.gsl_vector_int_swap(*args, **kwargs)
gsl_vector_int_swap = __block.gsl_vector_int_swap
def gsl_vector_int_swap_elements(*args, **kwargs):
return __block.gsl_vector_int_swap_elements(*args, **kwargs)
gsl_vector_int_swap_elements = __block.gsl_vector_int_swap_elements
def gsl_vector_int_max(*args, **kwargs):
return __block.gsl_vector_int_max(*args, **kwargs)
gsl_vector_int_max = __block.gsl_vector_int_max
def gsl_vector_int_min(*args, **kwargs):
return __block.gsl_vector_int_min(*args, **kwargs)
gsl_vector_int_min = __block.gsl_vector_int_min
def gsl_vector_int_minmax(*args, **kwargs):
return __block.gsl_vector_int_minmax(*args, **kwargs)
gsl_vector_int_minmax = __block.gsl_vector_int_minmax
def gsl_vector_int_max_index(*args, **kwargs):
return __block.gsl_vector_int_max_index(*args, **kwargs)
gsl_vector_int_max_index = __block.gsl_vector_int_max_index
def gsl_vector_int_min_index(*args, **kwargs):
return __block.gsl_vector_int_min_index(*args, **kwargs)
gsl_vector_int_min_index = __block.gsl_vector_int_min_index
def gsl_vector_int_minmax_index(*args, **kwargs):
return __block.gsl_vector_int_minmax_index(*args, **kwargs)
gsl_vector_int_minmax_index = __block.gsl_vector_int_minmax_index
def gsl_vector_int_isnull(*args, **kwargs):
return __block.gsl_vector_int_isnull(*args, **kwargs)
gsl_vector_int_isnull = __block.gsl_vector_int_isnull
def gsl_matrix_int_set_zero(*args, **kwargs):
return __block.gsl_matrix_int_set_zero(*args, **kwargs)
gsl_matrix_int_set_zero = __block.gsl_matrix_int_set_zero
def gsl_matrix_int_set_all(*args, **kwargs):
return __block.gsl_matrix_int_set_all(*args, **kwargs)
gsl_matrix_int_set_all = __block.gsl_matrix_int_set_all
def gsl_matrix_int_set_identity(*args, **kwargs):
return __block.gsl_matrix_int_set_identity(*args, **kwargs)
gsl_matrix_int_set_identity = __block.gsl_matrix_int_set_identity
def gsl_matrix_int_fread(*args, **kwargs):
return __block.gsl_matrix_int_fread(*args, **kwargs)
gsl_matrix_int_fread = __block.gsl_matrix_int_fread
def gsl_matrix_int_fwrite(*args, **kwargs):
return __block.gsl_matrix_int_fwrite(*args, **kwargs)
gsl_matrix_int_fwrite = __block.gsl_matrix_int_fwrite
def gsl_matrix_int_fscanf(*args, **kwargs):
return __block.gsl_matrix_int_fscanf(*args, **kwargs)
gsl_matrix_int_fscanf = __block.gsl_matrix_int_fscanf
def gsl_matrix_int_fprintf(*args, **kwargs):
return __block.gsl_matrix_int_fprintf(*args, **kwargs)
gsl_matrix_int_fprintf = __block.gsl_matrix_int_fprintf
def gsl_matrix_int_swap(*args, **kwargs):
return __block.gsl_matrix_int_swap(*args, **kwargs)
gsl_matrix_int_swap = __block.gsl_matrix_int_swap
def gsl_matrix_int_swap_rows(*args, **kwargs):
return __block.gsl_matrix_int_swap_rows(*args, **kwargs)
gsl_matrix_int_swap_rows = __block.gsl_matrix_int_swap_rows
def gsl_matrix_int_swap_columns(*args, **kwargs):
return __block.gsl_matrix_int_swap_columns(*args, **kwargs)
gsl_matrix_int_swap_columns = __block.gsl_matrix_int_swap_columns
def gsl_matrix_int_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_int_swap_rowcol(*args, **kwargs)
gsl_matrix_int_swap_rowcol = __block.gsl_matrix_int_swap_rowcol
def gsl_matrix_int_transpose(*args, **kwargs):
return __block.gsl_matrix_int_transpose(*args, **kwargs)
gsl_matrix_int_transpose = __block.gsl_matrix_int_transpose
def gsl_matrix_int_max(*args, **kwargs):
return __block.gsl_matrix_int_max(*args, **kwargs)
gsl_matrix_int_max = __block.gsl_matrix_int_max
def gsl_matrix_int_min(*args, **kwargs):
return __block.gsl_matrix_int_min(*args, **kwargs)
gsl_matrix_int_min = __block.gsl_matrix_int_min
def gsl_matrix_int_minmax(*args, **kwargs):
return __block.gsl_matrix_int_minmax(*args, **kwargs)
gsl_matrix_int_minmax = __block.gsl_matrix_int_minmax
def gsl_matrix_int_max_index(*args, **kwargs):
return __block.gsl_matrix_int_max_index(*args, **kwargs)
gsl_matrix_int_max_index = __block.gsl_matrix_int_max_index
def gsl_matrix_int_min_index(*args, **kwargs):
return __block.gsl_matrix_int_min_index(*args, **kwargs)
gsl_matrix_int_min_index = __block.gsl_matrix_int_min_index
def gsl_matrix_int_minmax_index(*args, **kwargs):
return __block.gsl_matrix_int_minmax_index(*args, **kwargs)
gsl_matrix_int_minmax_index = __block.gsl_matrix_int_minmax_index
def gsl_matrix_int_isnull(*args, **kwargs):
return __block.gsl_matrix_int_isnull(*args, **kwargs)
gsl_matrix_int_isnull = __block.gsl_matrix_int_isnull
def gsl_matrix_int_diagonal(*args, **kwargs):
return __block.gsl_matrix_int_diagonal(*args, **kwargs)
gsl_matrix_int_diagonal = __block.gsl_matrix_int_diagonal
def gsl_matrix_int_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_int_subdiagonal(*args, **kwargs)
gsl_matrix_int_subdiagonal = __block.gsl_matrix_int_subdiagonal
def gsl_matrix_int_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_int_superdiagonal(*args, **kwargs)
gsl_matrix_int_superdiagonal = __block.gsl_matrix_int_superdiagonal
def gsl_vector_short_set_zero(*args, **kwargs):
return __block.gsl_vector_short_set_zero(*args, **kwargs)
gsl_vector_short_set_zero = __block.gsl_vector_short_set_zero
def gsl_vector_short_set_all(*args, **kwargs):
return __block.gsl_vector_short_set_all(*args, **kwargs)
gsl_vector_short_set_all = __block.gsl_vector_short_set_all
def gsl_vector_short_set_basis(*args, **kwargs):
return __block.gsl_vector_short_set_basis(*args, **kwargs)
gsl_vector_short_set_basis = __block.gsl_vector_short_set_basis
def gsl_vector_short_fread(*args, **kwargs):
return __block.gsl_vector_short_fread(*args, **kwargs)
gsl_vector_short_fread = __block.gsl_vector_short_fread
def gsl_vector_short_fwrite(*args, **kwargs):
return __block.gsl_vector_short_fwrite(*args, **kwargs)
gsl_vector_short_fwrite = __block.gsl_vector_short_fwrite
def gsl_vector_short_fscanf(*args, **kwargs):
return __block.gsl_vector_short_fscanf(*args, **kwargs)
gsl_vector_short_fscanf = __block.gsl_vector_short_fscanf
def gsl_vector_short_fprintf(*args, **kwargs):
return __block.gsl_vector_short_fprintf(*args, **kwargs)
gsl_vector_short_fprintf = __block.gsl_vector_short_fprintf
def gsl_vector_short_reverse(*args, **kwargs):
return __block.gsl_vector_short_reverse(*args, **kwargs)
gsl_vector_short_reverse = __block.gsl_vector_short_reverse
def gsl_vector_short_swap(*args, **kwargs):
return __block.gsl_vector_short_swap(*args, **kwargs)
gsl_vector_short_swap = __block.gsl_vector_short_swap
def gsl_vector_short_swap_elements(*args, **kwargs):
return __block.gsl_vector_short_swap_elements(*args, **kwargs)
gsl_vector_short_swap_elements = __block.gsl_vector_short_swap_elements
def gsl_vector_short_max(*args, **kwargs):
return __block.gsl_vector_short_max(*args, **kwargs)
gsl_vector_short_max = __block.gsl_vector_short_max
def gsl_vector_short_min(*args, **kwargs):
return __block.gsl_vector_short_min(*args, **kwargs)
gsl_vector_short_min = __block.gsl_vector_short_min
def gsl_vector_short_minmax(*args, **kwargs):
return __block.gsl_vector_short_minmax(*args, **kwargs)
gsl_vector_short_minmax = __block.gsl_vector_short_minmax
def gsl_vector_short_max_index(*args, **kwargs):
return __block.gsl_vector_short_max_index(*args, **kwargs)
gsl_vector_short_max_index = __block.gsl_vector_short_max_index
def gsl_vector_short_min_index(*args, **kwargs):
return __block.gsl_vector_short_min_index(*args, **kwargs)
gsl_vector_short_min_index = __block.gsl_vector_short_min_index
def gsl_vector_short_minmax_index(*args, **kwargs):
return __block.gsl_vector_short_minmax_index(*args, **kwargs)
gsl_vector_short_minmax_index = __block.gsl_vector_short_minmax_index
def gsl_vector_short_isnull(*args, **kwargs):
return __block.gsl_vector_short_isnull(*args, **kwargs)
gsl_vector_short_isnull = __block.gsl_vector_short_isnull
def gsl_matrix_short_set_zero(*args, **kwargs):
return __block.gsl_matrix_short_set_zero(*args, **kwargs)
gsl_matrix_short_set_zero = __block.gsl_matrix_short_set_zero
def gsl_matrix_short_set_all(*args, **kwargs):
return __block.gsl_matrix_short_set_all(*args, **kwargs)
gsl_matrix_short_set_all = __block.gsl_matrix_short_set_all
def gsl_matrix_short_set_identity(*args, **kwargs):
return __block.gsl_matrix_short_set_identity(*args, **kwargs)
gsl_matrix_short_set_identity = __block.gsl_matrix_short_set_identity
def gsl_matrix_short_fread(*args, **kwargs):
return __block.gsl_matrix_short_fread(*args, **kwargs)
gsl_matrix_short_fread = __block.gsl_matrix_short_fread
def gsl_matrix_short_fwrite(*args, **kwargs):
return __block.gsl_matrix_short_fwrite(*args, **kwargs)
gsl_matrix_short_fwrite = __block.gsl_matrix_short_fwrite
def gsl_matrix_short_fscanf(*args, **kwargs):
return __block.gsl_matrix_short_fscanf(*args, **kwargs)
gsl_matrix_short_fscanf = __block.gsl_matrix_short_fscanf
def gsl_matrix_short_fprintf(*args, **kwargs):
return __block.gsl_matrix_short_fprintf(*args, **kwargs)
gsl_matrix_short_fprintf = __block.gsl_matrix_short_fprintf
def gsl_matrix_short_swap(*args, **kwargs):
return __block.gsl_matrix_short_swap(*args, **kwargs)
gsl_matrix_short_swap = __block.gsl_matrix_short_swap
def gsl_matrix_short_swap_rows(*args, **kwargs):
return __block.gsl_matrix_short_swap_rows(*args, **kwargs)
gsl_matrix_short_swap_rows = __block.gsl_matrix_short_swap_rows
def gsl_matrix_short_swap_columns(*args, **kwargs):
return __block.gsl_matrix_short_swap_columns(*args, **kwargs)
gsl_matrix_short_swap_columns = __block.gsl_matrix_short_swap_columns
def gsl_matrix_short_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_short_swap_rowcol(*args, **kwargs)
gsl_matrix_short_swap_rowcol = __block.gsl_matrix_short_swap_rowcol
def gsl_matrix_short_transpose(*args, **kwargs):
return __block.gsl_matrix_short_transpose(*args, **kwargs)
gsl_matrix_short_transpose = __block.gsl_matrix_short_transpose
def gsl_matrix_short_max(*args, **kwargs):
return __block.gsl_matrix_short_max(*args, **kwargs)
gsl_matrix_short_max = __block.gsl_matrix_short_max
def gsl_matrix_short_min(*args, **kwargs):
return __block.gsl_matrix_short_min(*args, **kwargs)
gsl_matrix_short_min = __block.gsl_matrix_short_min
def gsl_matrix_short_minmax(*args, **kwargs):
return __block.gsl_matrix_short_minmax(*args, **kwargs)
gsl_matrix_short_minmax = __block.gsl_matrix_short_minmax
def gsl_matrix_short_max_index(*args, **kwargs):
return __block.gsl_matrix_short_max_index(*args, **kwargs)
gsl_matrix_short_max_index = __block.gsl_matrix_short_max_index
def gsl_matrix_short_min_index(*args, **kwargs):
return __block.gsl_matrix_short_min_index(*args, **kwargs)
gsl_matrix_short_min_index = __block.gsl_matrix_short_min_index
def gsl_matrix_short_minmax_index(*args, **kwargs):
return __block.gsl_matrix_short_minmax_index(*args, **kwargs)
gsl_matrix_short_minmax_index = __block.gsl_matrix_short_minmax_index
def gsl_matrix_short_isnull(*args, **kwargs):
return __block.gsl_matrix_short_isnull(*args, **kwargs)
gsl_matrix_short_isnull = __block.gsl_matrix_short_isnull
def gsl_matrix_short_diagonal(*args, **kwargs):
return __block.gsl_matrix_short_diagonal(*args, **kwargs)
gsl_matrix_short_diagonal = __block.gsl_matrix_short_diagonal
def gsl_matrix_short_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_short_subdiagonal(*args, **kwargs)
gsl_matrix_short_subdiagonal = __block.gsl_matrix_short_subdiagonal
def gsl_matrix_short_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_short_superdiagonal(*args, **kwargs)
gsl_matrix_short_superdiagonal = __block.gsl_matrix_short_superdiagonal
def gsl_vector_char_set_zero(*args, **kwargs):
return __block.gsl_vector_char_set_zero(*args, **kwargs)
gsl_vector_char_set_zero = __block.gsl_vector_char_set_zero
def gsl_vector_char_set_all(*args, **kwargs):
return __block.gsl_vector_char_set_all(*args, **kwargs)
gsl_vector_char_set_all = __block.gsl_vector_char_set_all
def gsl_vector_char_set_basis(*args, **kwargs):
return __block.gsl_vector_char_set_basis(*args, **kwargs)
gsl_vector_char_set_basis = __block.gsl_vector_char_set_basis
def gsl_vector_char_fread(*args, **kwargs):
return __block.gsl_vector_char_fread(*args, **kwargs)
gsl_vector_char_fread = __block.gsl_vector_char_fread
def gsl_vector_char_fwrite(*args, **kwargs):
return __block.gsl_vector_char_fwrite(*args, **kwargs)
gsl_vector_char_fwrite = __block.gsl_vector_char_fwrite
def gsl_vector_char_fscanf(*args, **kwargs):
return __block.gsl_vector_char_fscanf(*args, **kwargs)
gsl_vector_char_fscanf = __block.gsl_vector_char_fscanf
def gsl_vector_char_fprintf(*args, **kwargs):
return __block.gsl_vector_char_fprintf(*args, **kwargs)
gsl_vector_char_fprintf = __block.gsl_vector_char_fprintf
def gsl_vector_char_reverse(*args, **kwargs):
return __block.gsl_vector_char_reverse(*args, **kwargs)
gsl_vector_char_reverse = __block.gsl_vector_char_reverse
def gsl_vector_char_swap(*args, **kwargs):
return __block.gsl_vector_char_swap(*args, **kwargs)
gsl_vector_char_swap = __block.gsl_vector_char_swap
def gsl_vector_char_swap_elements(*args, **kwargs):
return __block.gsl_vector_char_swap_elements(*args, **kwargs)
gsl_vector_char_swap_elements = __block.gsl_vector_char_swap_elements
def gsl_vector_char_max(*args, **kwargs):
return __block.gsl_vector_char_max(*args, **kwargs)
gsl_vector_char_max = __block.gsl_vector_char_max
def gsl_vector_char_min(*args, **kwargs):
return __block.gsl_vector_char_min(*args, **kwargs)
gsl_vector_char_min = __block.gsl_vector_char_min
def gsl_vector_char_minmax(*args, **kwargs):
return __block.gsl_vector_char_minmax(*args, **kwargs)
gsl_vector_char_minmax = __block.gsl_vector_char_minmax
def gsl_vector_char_max_index(*args, **kwargs):
return __block.gsl_vector_char_max_index(*args, **kwargs)
gsl_vector_char_max_index = __block.gsl_vector_char_max_index
def gsl_vector_char_min_index(*args, **kwargs):
return __block.gsl_vector_char_min_index(*args, **kwargs)
gsl_vector_char_min_index = __block.gsl_vector_char_min_index
def gsl_vector_char_minmax_index(*args, **kwargs):
return __block.gsl_vector_char_minmax_index(*args, **kwargs)
gsl_vector_char_minmax_index = __block.gsl_vector_char_minmax_index
def gsl_vector_char_isnull(*args, **kwargs):
return __block.gsl_vector_char_isnull(*args, **kwargs)
gsl_vector_char_isnull = __block.gsl_vector_char_isnull
def gsl_matrix_char_set_zero(*args, **kwargs):
return __block.gsl_matrix_char_set_zero(*args, **kwargs)
gsl_matrix_char_set_zero = __block.gsl_matrix_char_set_zero
def gsl_matrix_char_set_all(*args, **kwargs):
return __block.gsl_matrix_char_set_all(*args, **kwargs)
gsl_matrix_char_set_all = __block.gsl_matrix_char_set_all
def gsl_matrix_char_set_identity(*args, **kwargs):
return __block.gsl_matrix_char_set_identity(*args, **kwargs)
gsl_matrix_char_set_identity = __block.gsl_matrix_char_set_identity
def gsl_matrix_char_fread(*args, **kwargs):
return __block.gsl_matrix_char_fread(*args, **kwargs)
gsl_matrix_char_fread = __block.gsl_matrix_char_fread
def gsl_matrix_char_fwrite(*args, **kwargs):
return __block.gsl_matrix_char_fwrite(*args, **kwargs)
gsl_matrix_char_fwrite = __block.gsl_matrix_char_fwrite
def gsl_matrix_char_fscanf(*args, **kwargs):
return __block.gsl_matrix_char_fscanf(*args, **kwargs)
gsl_matrix_char_fscanf = __block.gsl_matrix_char_fscanf
def gsl_matrix_char_fprintf(*args, **kwargs):
return __block.gsl_matrix_char_fprintf(*args, **kwargs)
gsl_matrix_char_fprintf = __block.gsl_matrix_char_fprintf
def gsl_matrix_char_swap(*args, **kwargs):
return __block.gsl_matrix_char_swap(*args, **kwargs)
gsl_matrix_char_swap = __block.gsl_matrix_char_swap
def gsl_matrix_char_swap_rows(*args, **kwargs):
return __block.gsl_matrix_char_swap_rows(*args, **kwargs)
gsl_matrix_char_swap_rows = __block.gsl_matrix_char_swap_rows
def gsl_matrix_char_swap_columns(*args, **kwargs):
return __block.gsl_matrix_char_swap_columns(*args, **kwargs)
gsl_matrix_char_swap_columns = __block.gsl_matrix_char_swap_columns
def gsl_matrix_char_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_char_swap_rowcol(*args, **kwargs)
gsl_matrix_char_swap_rowcol = __block.gsl_matrix_char_swap_rowcol
def gsl_matrix_char_transpose(*args, **kwargs):
return __block.gsl_matrix_char_transpose(*args, **kwargs)
gsl_matrix_char_transpose = __block.gsl_matrix_char_transpose
def gsl_matrix_char_max(*args, **kwargs):
return __block.gsl_matrix_char_max(*args, **kwargs)
gsl_matrix_char_max = __block.gsl_matrix_char_max
def gsl_matrix_char_min(*args, **kwargs):
return __block.gsl_matrix_char_min(*args, **kwargs)
gsl_matrix_char_min = __block.gsl_matrix_char_min
def gsl_matrix_char_minmax(*args, **kwargs):
return __block.gsl_matrix_char_minmax(*args, **kwargs)
gsl_matrix_char_minmax = __block.gsl_matrix_char_minmax
def gsl_matrix_char_max_index(*args, **kwargs):
return __block.gsl_matrix_char_max_index(*args, **kwargs)
gsl_matrix_char_max_index = __block.gsl_matrix_char_max_index
def gsl_matrix_char_min_index(*args, **kwargs):
return __block.gsl_matrix_char_min_index(*args, **kwargs)
gsl_matrix_char_min_index = __block.gsl_matrix_char_min_index
def gsl_matrix_char_minmax_index(*args, **kwargs):
return __block.gsl_matrix_char_minmax_index(*args, **kwargs)
gsl_matrix_char_minmax_index = __block.gsl_matrix_char_minmax_index
def gsl_matrix_char_isnull(*args, **kwargs):
return __block.gsl_matrix_char_isnull(*args, **kwargs)
gsl_matrix_char_isnull = __block.gsl_matrix_char_isnull
def gsl_matrix_char_diagonal(*args, **kwargs):
return __block.gsl_matrix_char_diagonal(*args, **kwargs)
gsl_matrix_char_diagonal = __block.gsl_matrix_char_diagonal
def gsl_matrix_char_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_char_subdiagonal(*args, **kwargs)
gsl_matrix_char_subdiagonal = __block.gsl_matrix_char_subdiagonal
def gsl_matrix_char_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_char_superdiagonal(*args, **kwargs)
gsl_matrix_char_superdiagonal = __block.gsl_matrix_char_superdiagonal
def gsl_vector_complex_set_zero(*args, **kwargs):
return __block.gsl_vector_complex_set_zero(*args, **kwargs)
gsl_vector_complex_set_zero = __block.gsl_vector_complex_set_zero
def gsl_vector_complex_set_all(*args, **kwargs):
return __block.gsl_vector_complex_set_all(*args, **kwargs)
gsl_vector_complex_set_all = __block.gsl_vector_complex_set_all
def gsl_vector_complex_set_basis(*args, **kwargs):
return __block.gsl_vector_complex_set_basis(*args, **kwargs)
gsl_vector_complex_set_basis = __block.gsl_vector_complex_set_basis
def gsl_vector_complex_fread(*args, **kwargs):
return __block.gsl_vector_complex_fread(*args, **kwargs)
gsl_vector_complex_fread = __block.gsl_vector_complex_fread
def gsl_vector_complex_fwrite(*args, **kwargs):
return __block.gsl_vector_complex_fwrite(*args, **kwargs)
gsl_vector_complex_fwrite = __block.gsl_vector_complex_fwrite
def gsl_vector_complex_fscanf(*args, **kwargs):
return __block.gsl_vector_complex_fscanf(*args, **kwargs)
gsl_vector_complex_fscanf = __block.gsl_vector_complex_fscanf
def gsl_vector_complex_fprintf(*args, **kwargs):
return __block.gsl_vector_complex_fprintf(*args, **kwargs)
gsl_vector_complex_fprintf = __block.gsl_vector_complex_fprintf
def gsl_vector_complex_reverse(*args, **kwargs):
return __block.gsl_vector_complex_reverse(*args, **kwargs)
gsl_vector_complex_reverse = __block.gsl_vector_complex_reverse
def gsl_vector_complex_swap(*args, **kwargs):
return __block.gsl_vector_complex_swap(*args, **kwargs)
gsl_vector_complex_swap = __block.gsl_vector_complex_swap
def gsl_vector_complex_swap_elements(*args, **kwargs):
return __block.gsl_vector_complex_swap_elements(*args, **kwargs)
gsl_vector_complex_swap_elements = __block.gsl_vector_complex_swap_elements
def gsl_vector_complex_isnull(*args, **kwargs):
return __block.gsl_vector_complex_isnull(*args, **kwargs)
gsl_vector_complex_isnull = __block.gsl_vector_complex_isnull
def gsl_matrix_complex_set_zero(*args, **kwargs):
return __block.gsl_matrix_complex_set_zero(*args, **kwargs)
gsl_matrix_complex_set_zero = __block.gsl_matrix_complex_set_zero
def gsl_matrix_complex_set_all(*args, **kwargs):
return __block.gsl_matrix_complex_set_all(*args, **kwargs)
gsl_matrix_complex_set_all = __block.gsl_matrix_complex_set_all
def gsl_matrix_complex_set_identity(*args, **kwargs):
return __block.gsl_matrix_complex_set_identity(*args, **kwargs)
gsl_matrix_complex_set_identity = __block.gsl_matrix_complex_set_identity
def gsl_matrix_complex_fread(*args, **kwargs):
return __block.gsl_matrix_complex_fread(*args, **kwargs)
gsl_matrix_complex_fread = __block.gsl_matrix_complex_fread
def gsl_matrix_complex_fwrite(*args, **kwargs):
return __block.gsl_matrix_complex_fwrite(*args, **kwargs)
gsl_matrix_complex_fwrite = __block.gsl_matrix_complex_fwrite
def gsl_matrix_complex_fscanf(*args, **kwargs):
return __block.gsl_matrix_complex_fscanf(*args, **kwargs)
gsl_matrix_complex_fscanf = __block.gsl_matrix_complex_fscanf
def gsl_matrix_complex_fprintf(*args, **kwargs):
return __block.gsl_matrix_complex_fprintf(*args, **kwargs)
gsl_matrix_complex_fprintf = __block.gsl_matrix_complex_fprintf
def gsl_matrix_complex_swap(*args, **kwargs):
return __block.gsl_matrix_complex_swap(*args, **kwargs)
gsl_matrix_complex_swap = __block.gsl_matrix_complex_swap
def gsl_matrix_complex_swap_rows(*args, **kwargs):
return __block.gsl_matrix_complex_swap_rows(*args, **kwargs)
gsl_matrix_complex_swap_rows = __block.gsl_matrix_complex_swap_rows
def gsl_matrix_complex_swap_columns(*args, **kwargs):
return __block.gsl_matrix_complex_swap_columns(*args, **kwargs)
gsl_matrix_complex_swap_columns = __block.gsl_matrix_complex_swap_columns
def gsl_matrix_complex_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_complex_swap_rowcol(*args, **kwargs)
gsl_matrix_complex_swap_rowcol = __block.gsl_matrix_complex_swap_rowcol
def gsl_matrix_complex_transpose(*args, **kwargs):
return __block.gsl_matrix_complex_transpose(*args, **kwargs)
gsl_matrix_complex_transpose = __block.gsl_matrix_complex_transpose
def gsl_matrix_complex_isnull(*args, **kwargs):
return __block.gsl_matrix_complex_isnull(*args, **kwargs)
gsl_matrix_complex_isnull = __block.gsl_matrix_complex_isnull
def gsl_matrix_complex_diagonal(*args, **kwargs):
return __block.gsl_matrix_complex_diagonal(*args, **kwargs)
gsl_matrix_complex_diagonal = __block.gsl_matrix_complex_diagonal
def gsl_matrix_complex_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_subdiagonal(*args, **kwargs)
gsl_matrix_complex_subdiagonal = __block.gsl_matrix_complex_subdiagonal
def gsl_matrix_complex_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_superdiagonal(*args, **kwargs)
gsl_matrix_complex_superdiagonal = __block.gsl_matrix_complex_superdiagonal
def gsl_vector_complex_float_set_zero(*args, **kwargs):
return __block.gsl_vector_complex_float_set_zero(*args, **kwargs)
gsl_vector_complex_float_set_zero = __block.gsl_vector_complex_float_set_zero
def gsl_vector_complex_float_set_all(*args, **kwargs):
return __block.gsl_vector_complex_float_set_all(*args, **kwargs)
gsl_vector_complex_float_set_all = __block.gsl_vector_complex_float_set_all
def gsl_vector_complex_float_set_basis(*args, **kwargs):
return __block.gsl_vector_complex_float_set_basis(*args, **kwargs)
gsl_vector_complex_float_set_basis = __block.gsl_vector_complex_float_set_basis
def gsl_vector_complex_float_fread(*args, **kwargs):
return __block.gsl_vector_complex_float_fread(*args, **kwargs)
gsl_vector_complex_float_fread = __block.gsl_vector_complex_float_fread
def gsl_vector_complex_float_fwrite(*args, **kwargs):
return __block.gsl_vector_complex_float_fwrite(*args, **kwargs)
gsl_vector_complex_float_fwrite = __block.gsl_vector_complex_float_fwrite
def gsl_vector_complex_float_fscanf(*args, **kwargs):
return __block.gsl_vector_complex_float_fscanf(*args, **kwargs)
gsl_vector_complex_float_fscanf = __block.gsl_vector_complex_float_fscanf
def gsl_vector_complex_float_fprintf(*args, **kwargs):
return __block.gsl_vector_complex_float_fprintf(*args, **kwargs)
gsl_vector_complex_float_fprintf = __block.gsl_vector_complex_float_fprintf
def gsl_vector_complex_float_reverse(*args, **kwargs):
return __block.gsl_vector_complex_float_reverse(*args, **kwargs)
gsl_vector_complex_float_reverse = __block.gsl_vector_complex_float_reverse
def gsl_vector_complex_float_swap(*args, **kwargs):
return __block.gsl_vector_complex_float_swap(*args, **kwargs)
gsl_vector_complex_float_swap = __block.gsl_vector_complex_float_swap
def gsl_vector_complex_float_swap_elements(*args, **kwargs):
return __block.gsl_vector_complex_float_swap_elements(*args, **kwargs)
gsl_vector_complex_float_swap_elements = __block.gsl_vector_complex_float_swap_elements
def gsl_vector_complex_float_isnull(*args, **kwargs):
return __block.gsl_vector_complex_float_isnull(*args, **kwargs)
gsl_vector_complex_float_isnull = __block.gsl_vector_complex_float_isnull
def gsl_matrix_complex_float_set_zero(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_zero(*args, **kwargs)
gsl_matrix_complex_float_set_zero = __block.gsl_matrix_complex_float_set_zero
def gsl_matrix_complex_float_set_all(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_all(*args, **kwargs)
gsl_matrix_complex_float_set_all = __block.gsl_matrix_complex_float_set_all
def gsl_matrix_complex_float_set_identity(*args, **kwargs):
return __block.gsl_matrix_complex_float_set_identity(*args, **kwargs)
gsl_matrix_complex_float_set_identity = __block.gsl_matrix_complex_float_set_identity
def gsl_matrix_complex_float_fread(*args, **kwargs):
return __block.gsl_matrix_complex_float_fread(*args, **kwargs)
gsl_matrix_complex_float_fread = __block.gsl_matrix_complex_float_fread
def gsl_matrix_complex_float_fwrite(*args, **kwargs):
return __block.gsl_matrix_complex_float_fwrite(*args, **kwargs)
gsl_matrix_complex_float_fwrite = __block.gsl_matrix_complex_float_fwrite
def gsl_matrix_complex_float_fscanf(*args, **kwargs):
return __block.gsl_matrix_complex_float_fscanf(*args, **kwargs)
gsl_matrix_complex_float_fscanf = __block.gsl_matrix_complex_float_fscanf
def gsl_matrix_complex_float_fprintf(*args, **kwargs):
return __block.gsl_matrix_complex_float_fprintf(*args, **kwargs)
gsl_matrix_complex_float_fprintf = __block.gsl_matrix_complex_float_fprintf
def gsl_matrix_complex_float_swap(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap(*args, **kwargs)
gsl_matrix_complex_float_swap = __block.gsl_matrix_complex_float_swap
def gsl_matrix_complex_float_swap_rows(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_rows(*args, **kwargs)
gsl_matrix_complex_float_swap_rows = __block.gsl_matrix_complex_float_swap_rows
def gsl_matrix_complex_float_swap_columns(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_columns(*args, **kwargs)
gsl_matrix_complex_float_swap_columns = __block.gsl_matrix_complex_float_swap_columns
def gsl_matrix_complex_float_swap_rowcol(*args, **kwargs):
return __block.gsl_matrix_complex_float_swap_rowcol(*args, **kwargs)
gsl_matrix_complex_float_swap_rowcol = __block.gsl_matrix_complex_float_swap_rowcol
def gsl_matrix_complex_float_transpose(*args, **kwargs):
return __block.gsl_matrix_complex_float_transpose(*args, **kwargs)
gsl_matrix_complex_float_transpose = __block.gsl_matrix_complex_float_transpose
def gsl_matrix_complex_float_isnull(*args, **kwargs):
return __block.gsl_matrix_complex_float_isnull(*args, **kwargs)
gsl_matrix_complex_float_isnull = __block.gsl_matrix_complex_float_isnull
def gsl_matrix_complex_float_diagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_diagonal(*args, **kwargs)
gsl_matrix_complex_float_diagonal = __block.gsl_matrix_complex_float_diagonal
def gsl_matrix_complex_float_subdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_subdiagonal(*args, **kwargs)
gsl_matrix_complex_float_subdiagonal = __block.gsl_matrix_complex_float_subdiagonal
def gsl_matrix_complex_float_superdiagonal(*args, **kwargs):
return __block.gsl_matrix_complex_float_superdiagonal(*args, **kwargs)
gsl_matrix_complex_float_superdiagonal = __block.gsl_matrix_complex_float_superdiagonal
# This file is compatible with both classic and new-style classes.
| [
"__block.gsl_vector_short_fscanf",
"__block.gsl_matrix_short_min_index",
"__block.gsl_vector_short_max_index",
"__block.gsl_matrix_int_diagonal",
"__block.gsl_matrix_fwrite",
"__block.gsl_vector_char_set_basis",
"__block.gsl_matrix_float_transpose",
"__block.gsl_vector_complex_fwrite",
"__block.gsl_... | [((2136, 2180), '__block.gsl_vector_set_zero', '__block.gsl_vector_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (2163, 2180), False, 'import __block\n'), ((2282, 2325), '__block.gsl_vector_set_all', '__block.gsl_vector_set_all', (['*args'], {}), '(*args, **kwargs)\n', (2308, 2325), False, 'import __block\n'), ((2427, 2472), '__block.gsl_vector_set_basis', '__block.gsl_vector_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (2455, 2472), False, 'import __block\n'), ((2574, 2615), '__block.gsl_vector_fread', '__block.gsl_vector_fread', (['*args'], {}), '(*args, **kwargs)\n', (2598, 2615), False, 'import __block\n'), ((2710, 2752), '__block.gsl_vector_fwrite', '__block.gsl_vector_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (2735, 2752), False, 'import __block\n'), ((2849, 2891), '__block.gsl_vector_fscanf', '__block.gsl_vector_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (2874, 2891), False, 'import __block\n'), ((2989, 3032), '__block.gsl_vector_fprintf', '__block.gsl_vector_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (3015, 3032), False, 'import __block\n'), ((3132, 3175), '__block.gsl_vector_reverse', '__block.gsl_vector_reverse', (['*args'], {}), '(*args, **kwargs)\n', (3158, 3175), False, 'import __block\n'), ((3272, 3312), '__block.gsl_vector_swap', '__block.gsl_vector_swap', (['*args'], {}), '(*args, **kwargs)\n', (3295, 3312), False, 'import __block\n'), ((3412, 3461), '__block.gsl_vector_swap_elements', '__block.gsl_vector_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (3444, 3461), False, 'import __block\n'), ((3569, 3608), '__block.gsl_vector_max', '__block.gsl_vector_max', (['*args'], {}), '(*args, **kwargs)\n', (3591, 3608), False, 'import __block\n'), ((3696, 3735), '__block.gsl_vector_min', '__block.gsl_vector_min', (['*args'], {}), '(*args, **kwargs)\n', (3718, 3735), False, 'import __block\n'), ((3826, 3868), '__block.gsl_vector_minmax', '__block.gsl_vector_minmax', (['*args'], {}), '(*args, **kwargs)\n', (3851, 3868), False, 'import __block\n'), ((3968, 4013), '__block.gsl_vector_max_index', '__block.gsl_vector_max_index', (['*args'], {}), '(*args, **kwargs)\n', (3996, 4013), False, 'import __block\n'), ((4119, 4164), '__block.gsl_vector_min_index', '__block.gsl_vector_min_index', (['*args'], {}), '(*args, **kwargs)\n', (4147, 4164), False, 'import __block\n'), ((4273, 4321), '__block.gsl_vector_minmax_index', '__block.gsl_vector_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (4304, 4321), False, 'import __block\n'), ((4430, 4472), '__block.gsl_vector_isnull', '__block.gsl_vector_isnull', (['*args'], {}), '(*args, **kwargs)\n', (4455, 4472), False, 'import __block\n'), ((4571, 4615), '__block.gsl_matrix_set_zero', '__block.gsl_matrix_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (4598, 4615), False, 'import __block\n'), ((4717, 4760), '__block.gsl_matrix_set_all', '__block.gsl_matrix_set_all', (['*args'], {}), '(*args, **kwargs)\n', (4743, 4760), False, 'import __block\n'), ((4865, 4913), '__block.gsl_matrix_set_identity', '__block.gsl_matrix_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (4896, 4913), False, 'import __block\n'), ((5021, 5062), '__block.gsl_matrix_fread', '__block.gsl_matrix_fread', (['*args'], {}), '(*args, **kwargs)\n', (5045, 5062), False, 'import __block\n'), ((5157, 5199), '__block.gsl_matrix_fwrite', '__block.gsl_matrix_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (5182, 5199), False, 'import __block\n'), ((5296, 5338), '__block.gsl_matrix_fscanf', '__block.gsl_matrix_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (5321, 5338), False, 'import __block\n'), ((5436, 5479), '__block.gsl_matrix_fprintf', '__block.gsl_matrix_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (5462, 5479), False, 'import __block\n'), ((5576, 5616), '__block.gsl_matrix_swap', '__block.gsl_matrix_swap', (['*args'], {}), '(*args, **kwargs)\n', (5599, 5616), False, 'import __block\n'), ((5712, 5757), '__block.gsl_matrix_swap_rows', '__block.gsl_matrix_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (5740, 5757), False, 'import __block\n'), ((5866, 5914), '__block.gsl_matrix_swap_columns', '__block.gsl_matrix_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (5897, 5914), False, 'import __block\n'), ((6028, 6075), '__block.gsl_matrix_swap_rowcol', '__block.gsl_matrix_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (6058, 6075), False, 'import __block\n'), ((6185, 6230), '__block.gsl_matrix_transpose', '__block.gsl_matrix_transpose', (['*args'], {}), '(*args, **kwargs)\n', (6213, 6230), False, 'import __block\n'), ((6330, 6369), '__block.gsl_matrix_max', '__block.gsl_matrix_max', (['*args'], {}), '(*args, **kwargs)\n', (6352, 6369), False, 'import __block\n'), ((6457, 6496), '__block.gsl_matrix_min', '__block.gsl_matrix_min', (['*args'], {}), '(*args, **kwargs)\n', (6479, 6496), False, 'import __block\n'), ((6587, 6629), '__block.gsl_matrix_minmax', '__block.gsl_matrix_minmax', (['*args'], {}), '(*args, **kwargs)\n', (6612, 6629), False, 'import __block\n'), ((6729, 6774), '__block.gsl_matrix_max_index', '__block.gsl_matrix_max_index', (['*args'], {}), '(*args, **kwargs)\n', (6757, 6774), False, 'import __block\n'), ((6880, 6925), '__block.gsl_matrix_min_index', '__block.gsl_matrix_min_index', (['*args'], {}), '(*args, **kwargs)\n', (6908, 6925), False, 'import __block\n'), ((7034, 7082), '__block.gsl_matrix_minmax_index', '__block.gsl_matrix_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (7065, 7082), False, 'import __block\n'), ((7191, 7233), '__block.gsl_matrix_isnull', '__block.gsl_matrix_isnull', (['*args'], {}), '(*args, **kwargs)\n', (7216, 7233), False, 'import __block\n'), ((7332, 7376), '__block.gsl_matrix_diagonal', '__block.gsl_matrix_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (7359, 7376), False, 'import __block\n'), ((7482, 7529), '__block.gsl_matrix_subdiagonal', '__block.gsl_matrix_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (7512, 7529), False, 'import __block\n'), ((7643, 7692), '__block.gsl_matrix_superdiagonal', '__block.gsl_matrix_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (7675, 7692), False, 'import __block\n'), ((7811, 7861), '__block.gsl_vector_float_set_zero', '__block.gsl_vector_float_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (7844, 7861), False, 'import __block\n'), ((7981, 8030), '__block.gsl_vector_float_set_all', '__block.gsl_vector_float_set_all', (['*args'], {}), '(*args, **kwargs)\n', (8013, 8030), False, 'import __block\n'), ((8150, 8201), '__block.gsl_vector_float_set_basis', '__block.gsl_vector_float_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (8184, 8201), False, 'import __block\n'), ((8321, 8368), '__block.gsl_vector_float_fread', '__block.gsl_vector_float_fread', (['*args'], {}), '(*args, **kwargs)\n', (8351, 8368), False, 'import __block\n'), ((8481, 8529), '__block.gsl_vector_float_fwrite', '__block.gsl_vector_float_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (8512, 8529), False, 'import __block\n'), ((8644, 8692), '__block.gsl_vector_float_fscanf', '__block.gsl_vector_float_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (8675, 8692), False, 'import __block\n'), ((8808, 8857), '__block.gsl_vector_float_fprintf', '__block.gsl_vector_float_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (8840, 8857), False, 'import __block\n'), ((8975, 9024), '__block.gsl_vector_float_reverse', '__block.gsl_vector_float_reverse', (['*args'], {}), '(*args, **kwargs)\n', (9007, 9024), False, 'import __block\n'), ((9139, 9185), '__block.gsl_vector_float_swap', '__block.gsl_vector_float_swap', (['*args'], {}), '(*args, **kwargs)\n', (9168, 9185), False, 'import __block\n'), ((9303, 9358), '__block.gsl_vector_float_swap_elements', '__block.gsl_vector_float_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (9341, 9358), False, 'import __block\n'), ((9484, 9529), '__block.gsl_vector_float_max', '__block.gsl_vector_float_max', (['*args'], {}), '(*args, **kwargs)\n', (9512, 9529), False, 'import __block\n'), ((9635, 9680), '__block.gsl_vector_float_min', '__block.gsl_vector_float_min', (['*args'], {}), '(*args, **kwargs)\n', (9663, 9680), False, 'import __block\n'), ((9789, 9837), '__block.gsl_vector_float_minmax', '__block.gsl_vector_float_minmax', (['*args'], {}), '(*args, **kwargs)\n', (9820, 9837), False, 'import __block\n'), ((9955, 10006), '__block.gsl_vector_float_max_index', '__block.gsl_vector_float_max_index', (['*args'], {}), '(*args, **kwargs)\n', (9989, 10006), False, 'import __block\n'), ((10130, 10181), '__block.gsl_vector_float_min_index', '__block.gsl_vector_float_min_index', (['*args'], {}), '(*args, **kwargs)\n', (10164, 10181), False, 'import __block\n'), ((10308, 10362), '__block.gsl_vector_float_minmax_index', '__block.gsl_vector_float_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (10345, 10362), False, 'import __block\n'), ((10489, 10537), '__block.gsl_vector_float_isnull', '__block.gsl_vector_float_isnull', (['*args'], {}), '(*args, **kwargs)\n', (10520, 10537), False, 'import __block\n'), ((10654, 10704), '__block.gsl_matrix_float_set_zero', '__block.gsl_matrix_float_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (10687, 10704), False, 'import __block\n'), ((10824, 10873), '__block.gsl_matrix_float_set_all', '__block.gsl_matrix_float_set_all', (['*args'], {}), '(*args, **kwargs)\n', (10856, 10873), False, 'import __block\n'), ((10996, 11050), '__block.gsl_matrix_float_set_identity', '__block.gsl_matrix_float_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (11033, 11050), False, 'import __block\n'), ((11176, 11223), '__block.gsl_matrix_float_fread', '__block.gsl_matrix_float_fread', (['*args'], {}), '(*args, **kwargs)\n', (11206, 11223), False, 'import __block\n'), ((11336, 11384), '__block.gsl_matrix_float_fwrite', '__block.gsl_matrix_float_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (11367, 11384), False, 'import __block\n'), ((11499, 11547), '__block.gsl_matrix_float_fscanf', '__block.gsl_matrix_float_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (11530, 11547), False, 'import __block\n'), ((11663, 11712), '__block.gsl_matrix_float_fprintf', '__block.gsl_matrix_float_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (11695, 11712), False, 'import __block\n'), ((11827, 11873), '__block.gsl_matrix_float_swap', '__block.gsl_matrix_float_swap', (['*args'], {}), '(*args, **kwargs)\n', (11856, 11873), False, 'import __block\n'), ((11987, 12038), '__block.gsl_matrix_float_swap_rows', '__block.gsl_matrix_float_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (12021, 12038), False, 'import __block\n'), ((12165, 12219), '__block.gsl_matrix_float_swap_columns', '__block.gsl_matrix_float_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (12202, 12219), False, 'import __block\n'), ((12351, 12404), '__block.gsl_matrix_float_swap_rowcol', '__block.gsl_matrix_float_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (12387, 12404), False, 'import __block\n'), ((12532, 12583), '__block.gsl_matrix_float_transpose', '__block.gsl_matrix_float_transpose', (['*args'], {}), '(*args, **kwargs)\n', (12566, 12583), False, 'import __block\n'), ((12701, 12746), '__block.gsl_matrix_float_max', '__block.gsl_matrix_float_max', (['*args'], {}), '(*args, **kwargs)\n', (12729, 12746), False, 'import __block\n'), ((12852, 12897), '__block.gsl_matrix_float_min', '__block.gsl_matrix_float_min', (['*args'], {}), '(*args, **kwargs)\n', (12880, 12897), False, 'import __block\n'), ((13006, 13054), '__block.gsl_matrix_float_minmax', '__block.gsl_matrix_float_minmax', (['*args'], {}), '(*args, **kwargs)\n', (13037, 13054), False, 'import __block\n'), ((13172, 13223), '__block.gsl_matrix_float_max_index', '__block.gsl_matrix_float_max_index', (['*args'], {}), '(*args, **kwargs)\n', (13206, 13223), False, 'import __block\n'), ((13347, 13398), '__block.gsl_matrix_float_min_index', '__block.gsl_matrix_float_min_index', (['*args'], {}), '(*args, **kwargs)\n', (13381, 13398), False, 'import __block\n'), ((13525, 13579), '__block.gsl_matrix_float_minmax_index', '__block.gsl_matrix_float_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (13562, 13579), False, 'import __block\n'), ((13706, 13754), '__block.gsl_matrix_float_isnull', '__block.gsl_matrix_float_isnull', (['*args'], {}), '(*args, **kwargs)\n', (13737, 13754), False, 'import __block\n'), ((13871, 13921), '__block.gsl_matrix_float_diagonal', '__block.gsl_matrix_float_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (13904, 13921), False, 'import __block\n'), ((14045, 14098), '__block.gsl_matrix_float_subdiagonal', '__block.gsl_matrix_float_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (14081, 14098), False, 'import __block\n'), ((14230, 14285), '__block.gsl_matrix_float_superdiagonal', '__block.gsl_matrix_float_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (14268, 14285), False, 'import __block\n'), ((14415, 14464), '__block.gsl_vector_long_set_zero', '__block.gsl_vector_long_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (14447, 14464), False, 'import __block\n'), ((14581, 14629), '__block.gsl_vector_long_set_all', '__block.gsl_vector_long_set_all', (['*args'], {}), '(*args, **kwargs)\n', (14612, 14629), False, 'import __block\n'), ((14746, 14796), '__block.gsl_vector_long_set_basis', '__block.gsl_vector_long_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (14779, 14796), False, 'import __block\n'), ((14913, 14959), '__block.gsl_vector_long_fread', '__block.gsl_vector_long_fread', (['*args'], {}), '(*args, **kwargs)\n', (14942, 14959), False, 'import __block\n'), ((15069, 15116), '__block.gsl_vector_long_fwrite', '__block.gsl_vector_long_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (15099, 15116), False, 'import __block\n'), ((15228, 15275), '__block.gsl_vector_long_fscanf', '__block.gsl_vector_long_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (15258, 15275), False, 'import __block\n'), ((15388, 15436), '__block.gsl_vector_long_fprintf', '__block.gsl_vector_long_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (15419, 15436), False, 'import __block\n'), ((15551, 15599), '__block.gsl_vector_long_reverse', '__block.gsl_vector_long_reverse', (['*args'], {}), '(*args, **kwargs)\n', (15582, 15599), False, 'import __block\n'), ((15711, 15756), '__block.gsl_vector_long_swap', '__block.gsl_vector_long_swap', (['*args'], {}), '(*args, **kwargs)\n', (15739, 15756), False, 'import __block\n'), ((15871, 15925), '__block.gsl_vector_long_swap_elements', '__block.gsl_vector_long_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (15908, 15925), False, 'import __block\n'), ((16048, 16092), '__block.gsl_vector_long_max', '__block.gsl_vector_long_max', (['*args'], {}), '(*args, **kwargs)\n', (16075, 16092), False, 'import __block\n'), ((16195, 16239), '__block.gsl_vector_long_min', '__block.gsl_vector_long_min', (['*args'], {}), '(*args, **kwargs)\n', (16222, 16239), False, 'import __block\n'), ((16345, 16392), '__block.gsl_vector_long_minmax', '__block.gsl_vector_long_minmax', (['*args'], {}), '(*args, **kwargs)\n', (16375, 16392), False, 'import __block\n'), ((16507, 16557), '__block.gsl_vector_long_max_index', '__block.gsl_vector_long_max_index', (['*args'], {}), '(*args, **kwargs)\n', (16540, 16557), False, 'import __block\n'), ((16678, 16728), '__block.gsl_vector_long_min_index', '__block.gsl_vector_long_min_index', (['*args'], {}), '(*args, **kwargs)\n', (16711, 16728), False, 'import __block\n'), ((16852, 16905), '__block.gsl_vector_long_minmax_index', '__block.gsl_vector_long_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (16888, 16905), False, 'import __block\n'), ((17029, 17076), '__block.gsl_vector_long_isnull', '__block.gsl_vector_long_isnull', (['*args'], {}), '(*args, **kwargs)\n', (17059, 17076), False, 'import __block\n'), ((17190, 17239), '__block.gsl_matrix_long_set_zero', '__block.gsl_matrix_long_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (17222, 17239), False, 'import __block\n'), ((17356, 17404), '__block.gsl_matrix_long_set_all', '__block.gsl_matrix_long_set_all', (['*args'], {}), '(*args, **kwargs)\n', (17387, 17404), False, 'import __block\n'), ((17524, 17577), '__block.gsl_matrix_long_set_identity', '__block.gsl_matrix_long_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (17560, 17577), False, 'import __block\n'), ((17700, 17746), '__block.gsl_matrix_long_fread', '__block.gsl_matrix_long_fread', (['*args'], {}), '(*args, **kwargs)\n', (17729, 17746), False, 'import __block\n'), ((17856, 17903), '__block.gsl_matrix_long_fwrite', '__block.gsl_matrix_long_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (17886, 17903), False, 'import __block\n'), ((18015, 18062), '__block.gsl_matrix_long_fscanf', '__block.gsl_matrix_long_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (18045, 18062), False, 'import __block\n'), ((18175, 18223), '__block.gsl_matrix_long_fprintf', '__block.gsl_matrix_long_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (18206, 18223), False, 'import __block\n'), ((18335, 18380), '__block.gsl_matrix_long_swap', '__block.gsl_matrix_long_swap', (['*args'], {}), '(*args, **kwargs)\n', (18363, 18380), False, 'import __block\n'), ((18491, 18541), '__block.gsl_matrix_long_swap_rows', '__block.gsl_matrix_long_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (18524, 18541), False, 'import __block\n'), ((18665, 18718), '__block.gsl_matrix_long_swap_columns', '__block.gsl_matrix_long_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (18701, 18718), False, 'import __block\n'), ((18847, 18899), '__block.gsl_matrix_long_swap_rowcol', '__block.gsl_matrix_long_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (18882, 18899), False, 'import __block\n'), ((19024, 19074), '__block.gsl_matrix_long_transpose', '__block.gsl_matrix_long_transpose', (['*args'], {}), '(*args, **kwargs)\n', (19057, 19074), False, 'import __block\n'), ((19189, 19233), '__block.gsl_matrix_long_max', '__block.gsl_matrix_long_max', (['*args'], {}), '(*args, **kwargs)\n', (19216, 19233), False, 'import __block\n'), ((19336, 19380), '__block.gsl_matrix_long_min', '__block.gsl_matrix_long_min', (['*args'], {}), '(*args, **kwargs)\n', (19363, 19380), False, 'import __block\n'), ((19486, 19533), '__block.gsl_matrix_long_minmax', '__block.gsl_matrix_long_minmax', (['*args'], {}), '(*args, **kwargs)\n', (19516, 19533), False, 'import __block\n'), ((19648, 19698), '__block.gsl_matrix_long_max_index', '__block.gsl_matrix_long_max_index', (['*args'], {}), '(*args, **kwargs)\n', (19681, 19698), False, 'import __block\n'), ((19819, 19869), '__block.gsl_matrix_long_min_index', '__block.gsl_matrix_long_min_index', (['*args'], {}), '(*args, **kwargs)\n', (19852, 19869), False, 'import __block\n'), ((19993, 20046), '__block.gsl_matrix_long_minmax_index', '__block.gsl_matrix_long_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (20029, 20046), False, 'import __block\n'), ((20170, 20217), '__block.gsl_matrix_long_isnull', '__block.gsl_matrix_long_isnull', (['*args'], {}), '(*args, **kwargs)\n', (20200, 20217), False, 'import __block\n'), ((20331, 20380), '__block.gsl_matrix_long_diagonal', '__block.gsl_matrix_long_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (20363, 20380), False, 'import __block\n'), ((20501, 20553), '__block.gsl_matrix_long_subdiagonal', '__block.gsl_matrix_long_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (20536, 20553), False, 'import __block\n'), ((20682, 20736), '__block.gsl_matrix_long_superdiagonal', '__block.gsl_matrix_long_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (20719, 20736), False, 'import __block\n'), ((20863, 20911), '__block.gsl_vector_int_set_zero', '__block.gsl_vector_int_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (20894, 20911), False, 'import __block\n'), ((21025, 21072), '__block.gsl_vector_int_set_all', '__block.gsl_vector_int_set_all', (['*args'], {}), '(*args, **kwargs)\n', (21055, 21072), False, 'import __block\n'), ((21186, 21235), '__block.gsl_vector_int_set_basis', '__block.gsl_vector_int_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (21218, 21235), False, 'import __block\n'), ((21349, 21394), '__block.gsl_vector_int_fread', '__block.gsl_vector_int_fread', (['*args'], {}), '(*args, **kwargs)\n', (21377, 21394), False, 'import __block\n'), ((21501, 21547), '__block.gsl_vector_int_fwrite', '__block.gsl_vector_int_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (21530, 21547), False, 'import __block\n'), ((21656, 21702), '__block.gsl_vector_int_fscanf', '__block.gsl_vector_int_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (21685, 21702), False, 'import __block\n'), ((21812, 21859), '__block.gsl_vector_int_fprintf', '__block.gsl_vector_int_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (21842, 21859), False, 'import __block\n'), ((21971, 22018), '__block.gsl_vector_int_reverse', '__block.gsl_vector_int_reverse', (['*args'], {}), '(*args, **kwargs)\n', (22001, 22018), False, 'import __block\n'), ((22127, 22171), '__block.gsl_vector_int_swap', '__block.gsl_vector_int_swap', (['*args'], {}), '(*args, **kwargs)\n', (22154, 22171), False, 'import __block\n'), ((22283, 22336), '__block.gsl_vector_int_swap_elements', '__block.gsl_vector_int_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (22319, 22336), False, 'import __block\n'), ((22456, 22499), '__block.gsl_vector_int_max', '__block.gsl_vector_int_max', (['*args'], {}), '(*args, **kwargs)\n', (22482, 22499), False, 'import __block\n'), ((22599, 22642), '__block.gsl_vector_int_min', '__block.gsl_vector_int_min', (['*args'], {}), '(*args, **kwargs)\n', (22625, 22642), False, 'import __block\n'), ((22745, 22791), '__block.gsl_vector_int_minmax', '__block.gsl_vector_int_minmax', (['*args'], {}), '(*args, **kwargs)\n', (22774, 22791), False, 'import __block\n'), ((22903, 22952), '__block.gsl_vector_int_max_index', '__block.gsl_vector_int_max_index', (['*args'], {}), '(*args, **kwargs)\n', (22935, 22952), False, 'import __block\n'), ((23070, 23119), '__block.gsl_vector_int_min_index', '__block.gsl_vector_int_min_index', (['*args'], {}), '(*args, **kwargs)\n', (23102, 23119), False, 'import __block\n'), ((23240, 23292), '__block.gsl_vector_int_minmax_index', '__block.gsl_vector_int_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (23275, 23292), False, 'import __block\n'), ((23413, 23459), '__block.gsl_vector_int_isnull', '__block.gsl_vector_int_isnull', (['*args'], {}), '(*args, **kwargs)\n', (23442, 23459), False, 'import __block\n'), ((23570, 23618), '__block.gsl_matrix_int_set_zero', '__block.gsl_matrix_int_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (23601, 23618), False, 'import __block\n'), ((23732, 23779), '__block.gsl_matrix_int_set_all', '__block.gsl_matrix_int_set_all', (['*args'], {}), '(*args, **kwargs)\n', (23762, 23779), False, 'import __block\n'), ((23896, 23948), '__block.gsl_matrix_int_set_identity', '__block.gsl_matrix_int_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (23931, 23948), False, 'import __block\n'), ((24068, 24113), '__block.gsl_matrix_int_fread', '__block.gsl_matrix_int_fread', (['*args'], {}), '(*args, **kwargs)\n', (24096, 24113), False, 'import __block\n'), ((24220, 24266), '__block.gsl_matrix_int_fwrite', '__block.gsl_matrix_int_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (24249, 24266), False, 'import __block\n'), ((24375, 24421), '__block.gsl_matrix_int_fscanf', '__block.gsl_matrix_int_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (24404, 24421), False, 'import __block\n'), ((24531, 24578), '__block.gsl_matrix_int_fprintf', '__block.gsl_matrix_int_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (24561, 24578), False, 'import __block\n'), ((24687, 24731), '__block.gsl_matrix_int_swap', '__block.gsl_matrix_int_swap', (['*args'], {}), '(*args, **kwargs)\n', (24714, 24731), False, 'import __block\n'), ((24839, 24888), '__block.gsl_matrix_int_swap_rows', '__block.gsl_matrix_int_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (24871, 24888), False, 'import __block\n'), ((25009, 25061), '__block.gsl_matrix_int_swap_columns', '__block.gsl_matrix_int_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (25044, 25061), False, 'import __block\n'), ((25187, 25238), '__block.gsl_matrix_int_swap_rowcol', '__block.gsl_matrix_int_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (25221, 25238), False, 'import __block\n'), ((25360, 25409), '__block.gsl_matrix_int_transpose', '__block.gsl_matrix_int_transpose', (['*args'], {}), '(*args, **kwargs)\n', (25392, 25409), False, 'import __block\n'), ((25521, 25564), '__block.gsl_matrix_int_max', '__block.gsl_matrix_int_max', (['*args'], {}), '(*args, **kwargs)\n', (25547, 25564), False, 'import __block\n'), ((25664, 25707), '__block.gsl_matrix_int_min', '__block.gsl_matrix_int_min', (['*args'], {}), '(*args, **kwargs)\n', (25690, 25707), False, 'import __block\n'), ((25810, 25856), '__block.gsl_matrix_int_minmax', '__block.gsl_matrix_int_minmax', (['*args'], {}), '(*args, **kwargs)\n', (25839, 25856), False, 'import __block\n'), ((25968, 26017), '__block.gsl_matrix_int_max_index', '__block.gsl_matrix_int_max_index', (['*args'], {}), '(*args, **kwargs)\n', (26000, 26017), False, 'import __block\n'), ((26135, 26184), '__block.gsl_matrix_int_min_index', '__block.gsl_matrix_int_min_index', (['*args'], {}), '(*args, **kwargs)\n', (26167, 26184), False, 'import __block\n'), ((26305, 26357), '__block.gsl_matrix_int_minmax_index', '__block.gsl_matrix_int_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (26340, 26357), False, 'import __block\n'), ((26478, 26524), '__block.gsl_matrix_int_isnull', '__block.gsl_matrix_int_isnull', (['*args'], {}), '(*args, **kwargs)\n', (26507, 26524), False, 'import __block\n'), ((26635, 26683), '__block.gsl_matrix_int_diagonal', '__block.gsl_matrix_int_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (26666, 26683), False, 'import __block\n'), ((26801, 26852), '__block.gsl_matrix_int_subdiagonal', '__block.gsl_matrix_int_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (26835, 26852), False, 'import __block\n'), ((26978, 27031), '__block.gsl_matrix_int_superdiagonal', '__block.gsl_matrix_int_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (27014, 27031), False, 'import __block\n'), ((27158, 27208), '__block.gsl_vector_short_set_zero', '__block.gsl_vector_short_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (27191, 27208), False, 'import __block\n'), ((27328, 27377), '__block.gsl_vector_short_set_all', '__block.gsl_vector_short_set_all', (['*args'], {}), '(*args, **kwargs)\n', (27360, 27377), False, 'import __block\n'), ((27497, 27548), '__block.gsl_vector_short_set_basis', '__block.gsl_vector_short_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (27531, 27548), False, 'import __block\n'), ((27668, 27715), '__block.gsl_vector_short_fread', '__block.gsl_vector_short_fread', (['*args'], {}), '(*args, **kwargs)\n', (27698, 27715), False, 'import __block\n'), ((27828, 27876), '__block.gsl_vector_short_fwrite', '__block.gsl_vector_short_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (27859, 27876), False, 'import __block\n'), ((27991, 28039), '__block.gsl_vector_short_fscanf', '__block.gsl_vector_short_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (28022, 28039), False, 'import __block\n'), ((28155, 28204), '__block.gsl_vector_short_fprintf', '__block.gsl_vector_short_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (28187, 28204), False, 'import __block\n'), ((28322, 28371), '__block.gsl_vector_short_reverse', '__block.gsl_vector_short_reverse', (['*args'], {}), '(*args, **kwargs)\n', (28354, 28371), False, 'import __block\n'), ((28486, 28532), '__block.gsl_vector_short_swap', '__block.gsl_vector_short_swap', (['*args'], {}), '(*args, **kwargs)\n', (28515, 28532), False, 'import __block\n'), ((28650, 28705), '__block.gsl_vector_short_swap_elements', '__block.gsl_vector_short_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (28688, 28705), False, 'import __block\n'), ((28831, 28876), '__block.gsl_vector_short_max', '__block.gsl_vector_short_max', (['*args'], {}), '(*args, **kwargs)\n', (28859, 28876), False, 'import __block\n'), ((28982, 29027), '__block.gsl_vector_short_min', '__block.gsl_vector_short_min', (['*args'], {}), '(*args, **kwargs)\n', (29010, 29027), False, 'import __block\n'), ((29136, 29184), '__block.gsl_vector_short_minmax', '__block.gsl_vector_short_minmax', (['*args'], {}), '(*args, **kwargs)\n', (29167, 29184), False, 'import __block\n'), ((29302, 29353), '__block.gsl_vector_short_max_index', '__block.gsl_vector_short_max_index', (['*args'], {}), '(*args, **kwargs)\n', (29336, 29353), False, 'import __block\n'), ((29477, 29528), '__block.gsl_vector_short_min_index', '__block.gsl_vector_short_min_index', (['*args'], {}), '(*args, **kwargs)\n', (29511, 29528), False, 'import __block\n'), ((29655, 29709), '__block.gsl_vector_short_minmax_index', '__block.gsl_vector_short_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (29692, 29709), False, 'import __block\n'), ((29836, 29884), '__block.gsl_vector_short_isnull', '__block.gsl_vector_short_isnull', (['*args'], {}), '(*args, **kwargs)\n', (29867, 29884), False, 'import __block\n'), ((30001, 30051), '__block.gsl_matrix_short_set_zero', '__block.gsl_matrix_short_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (30034, 30051), False, 'import __block\n'), ((30171, 30220), '__block.gsl_matrix_short_set_all', '__block.gsl_matrix_short_set_all', (['*args'], {}), '(*args, **kwargs)\n', (30203, 30220), False, 'import __block\n'), ((30343, 30397), '__block.gsl_matrix_short_set_identity', '__block.gsl_matrix_short_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (30380, 30397), False, 'import __block\n'), ((30523, 30570), '__block.gsl_matrix_short_fread', '__block.gsl_matrix_short_fread', (['*args'], {}), '(*args, **kwargs)\n', (30553, 30570), False, 'import __block\n'), ((30683, 30731), '__block.gsl_matrix_short_fwrite', '__block.gsl_matrix_short_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (30714, 30731), False, 'import __block\n'), ((30846, 30894), '__block.gsl_matrix_short_fscanf', '__block.gsl_matrix_short_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (30877, 30894), False, 'import __block\n'), ((31010, 31059), '__block.gsl_matrix_short_fprintf', '__block.gsl_matrix_short_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (31042, 31059), False, 'import __block\n'), ((31174, 31220), '__block.gsl_matrix_short_swap', '__block.gsl_matrix_short_swap', (['*args'], {}), '(*args, **kwargs)\n', (31203, 31220), False, 'import __block\n'), ((31334, 31385), '__block.gsl_matrix_short_swap_rows', '__block.gsl_matrix_short_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (31368, 31385), False, 'import __block\n'), ((31512, 31566), '__block.gsl_matrix_short_swap_columns', '__block.gsl_matrix_short_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (31549, 31566), False, 'import __block\n'), ((31698, 31751), '__block.gsl_matrix_short_swap_rowcol', '__block.gsl_matrix_short_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (31734, 31751), False, 'import __block\n'), ((31879, 31930), '__block.gsl_matrix_short_transpose', '__block.gsl_matrix_short_transpose', (['*args'], {}), '(*args, **kwargs)\n', (31913, 31930), False, 'import __block\n'), ((32048, 32093), '__block.gsl_matrix_short_max', '__block.gsl_matrix_short_max', (['*args'], {}), '(*args, **kwargs)\n', (32076, 32093), False, 'import __block\n'), ((32199, 32244), '__block.gsl_matrix_short_min', '__block.gsl_matrix_short_min', (['*args'], {}), '(*args, **kwargs)\n', (32227, 32244), False, 'import __block\n'), ((32353, 32401), '__block.gsl_matrix_short_minmax', '__block.gsl_matrix_short_minmax', (['*args'], {}), '(*args, **kwargs)\n', (32384, 32401), False, 'import __block\n'), ((32519, 32570), '__block.gsl_matrix_short_max_index', '__block.gsl_matrix_short_max_index', (['*args'], {}), '(*args, **kwargs)\n', (32553, 32570), False, 'import __block\n'), ((32694, 32745), '__block.gsl_matrix_short_min_index', '__block.gsl_matrix_short_min_index', (['*args'], {}), '(*args, **kwargs)\n', (32728, 32745), False, 'import __block\n'), ((32872, 32926), '__block.gsl_matrix_short_minmax_index', '__block.gsl_matrix_short_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (32909, 32926), False, 'import __block\n'), ((33053, 33101), '__block.gsl_matrix_short_isnull', '__block.gsl_matrix_short_isnull', (['*args'], {}), '(*args, **kwargs)\n', (33084, 33101), False, 'import __block\n'), ((33218, 33268), '__block.gsl_matrix_short_diagonal', '__block.gsl_matrix_short_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (33251, 33268), False, 'import __block\n'), ((33392, 33445), '__block.gsl_matrix_short_subdiagonal', '__block.gsl_matrix_short_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (33428, 33445), False, 'import __block\n'), ((33577, 33632), '__block.gsl_matrix_short_superdiagonal', '__block.gsl_matrix_short_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (33615, 33632), False, 'import __block\n'), ((33762, 33811), '__block.gsl_vector_char_set_zero', '__block.gsl_vector_char_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (33794, 33811), False, 'import __block\n'), ((33928, 33976), '__block.gsl_vector_char_set_all', '__block.gsl_vector_char_set_all', (['*args'], {}), '(*args, **kwargs)\n', (33959, 33976), False, 'import __block\n'), ((34093, 34143), '__block.gsl_vector_char_set_basis', '__block.gsl_vector_char_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (34126, 34143), False, 'import __block\n'), ((34260, 34306), '__block.gsl_vector_char_fread', '__block.gsl_vector_char_fread', (['*args'], {}), '(*args, **kwargs)\n', (34289, 34306), False, 'import __block\n'), ((34416, 34463), '__block.gsl_vector_char_fwrite', '__block.gsl_vector_char_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (34446, 34463), False, 'import __block\n'), ((34575, 34622), '__block.gsl_vector_char_fscanf', '__block.gsl_vector_char_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (34605, 34622), False, 'import __block\n'), ((34735, 34783), '__block.gsl_vector_char_fprintf', '__block.gsl_vector_char_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (34766, 34783), False, 'import __block\n'), ((34898, 34946), '__block.gsl_vector_char_reverse', '__block.gsl_vector_char_reverse', (['*args'], {}), '(*args, **kwargs)\n', (34929, 34946), False, 'import __block\n'), ((35058, 35103), '__block.gsl_vector_char_swap', '__block.gsl_vector_char_swap', (['*args'], {}), '(*args, **kwargs)\n', (35086, 35103), False, 'import __block\n'), ((35218, 35272), '__block.gsl_vector_char_swap_elements', '__block.gsl_vector_char_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (35255, 35272), False, 'import __block\n'), ((35395, 35439), '__block.gsl_vector_char_max', '__block.gsl_vector_char_max', (['*args'], {}), '(*args, **kwargs)\n', (35422, 35439), False, 'import __block\n'), ((35542, 35586), '__block.gsl_vector_char_min', '__block.gsl_vector_char_min', (['*args'], {}), '(*args, **kwargs)\n', (35569, 35586), False, 'import __block\n'), ((35692, 35739), '__block.gsl_vector_char_minmax', '__block.gsl_vector_char_minmax', (['*args'], {}), '(*args, **kwargs)\n', (35722, 35739), False, 'import __block\n'), ((35854, 35904), '__block.gsl_vector_char_max_index', '__block.gsl_vector_char_max_index', (['*args'], {}), '(*args, **kwargs)\n', (35887, 35904), False, 'import __block\n'), ((36025, 36075), '__block.gsl_vector_char_min_index', '__block.gsl_vector_char_min_index', (['*args'], {}), '(*args, **kwargs)\n', (36058, 36075), False, 'import __block\n'), ((36199, 36252), '__block.gsl_vector_char_minmax_index', '__block.gsl_vector_char_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (36235, 36252), False, 'import __block\n'), ((36376, 36423), '__block.gsl_vector_char_isnull', '__block.gsl_vector_char_isnull', (['*args'], {}), '(*args, **kwargs)\n', (36406, 36423), False, 'import __block\n'), ((36537, 36586), '__block.gsl_matrix_char_set_zero', '__block.gsl_matrix_char_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (36569, 36586), False, 'import __block\n'), ((36703, 36751), '__block.gsl_matrix_char_set_all', '__block.gsl_matrix_char_set_all', (['*args'], {}), '(*args, **kwargs)\n', (36734, 36751), False, 'import __block\n'), ((36871, 36924), '__block.gsl_matrix_char_set_identity', '__block.gsl_matrix_char_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (36907, 36924), False, 'import __block\n'), ((37047, 37093), '__block.gsl_matrix_char_fread', '__block.gsl_matrix_char_fread', (['*args'], {}), '(*args, **kwargs)\n', (37076, 37093), False, 'import __block\n'), ((37203, 37250), '__block.gsl_matrix_char_fwrite', '__block.gsl_matrix_char_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (37233, 37250), False, 'import __block\n'), ((37362, 37409), '__block.gsl_matrix_char_fscanf', '__block.gsl_matrix_char_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (37392, 37409), False, 'import __block\n'), ((37522, 37570), '__block.gsl_matrix_char_fprintf', '__block.gsl_matrix_char_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (37553, 37570), False, 'import __block\n'), ((37682, 37727), '__block.gsl_matrix_char_swap', '__block.gsl_matrix_char_swap', (['*args'], {}), '(*args, **kwargs)\n', (37710, 37727), False, 'import __block\n'), ((37838, 37888), '__block.gsl_matrix_char_swap_rows', '__block.gsl_matrix_char_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (37871, 37888), False, 'import __block\n'), ((38012, 38065), '__block.gsl_matrix_char_swap_columns', '__block.gsl_matrix_char_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (38048, 38065), False, 'import __block\n'), ((38194, 38246), '__block.gsl_matrix_char_swap_rowcol', '__block.gsl_matrix_char_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (38229, 38246), False, 'import __block\n'), ((38371, 38421), '__block.gsl_matrix_char_transpose', '__block.gsl_matrix_char_transpose', (['*args'], {}), '(*args, **kwargs)\n', (38404, 38421), False, 'import __block\n'), ((38536, 38580), '__block.gsl_matrix_char_max', '__block.gsl_matrix_char_max', (['*args'], {}), '(*args, **kwargs)\n', (38563, 38580), False, 'import __block\n'), ((38683, 38727), '__block.gsl_matrix_char_min', '__block.gsl_matrix_char_min', (['*args'], {}), '(*args, **kwargs)\n', (38710, 38727), False, 'import __block\n'), ((38833, 38880), '__block.gsl_matrix_char_minmax', '__block.gsl_matrix_char_minmax', (['*args'], {}), '(*args, **kwargs)\n', (38863, 38880), False, 'import __block\n'), ((38995, 39045), '__block.gsl_matrix_char_max_index', '__block.gsl_matrix_char_max_index', (['*args'], {}), '(*args, **kwargs)\n', (39028, 39045), False, 'import __block\n'), ((39166, 39216), '__block.gsl_matrix_char_min_index', '__block.gsl_matrix_char_min_index', (['*args'], {}), '(*args, **kwargs)\n', (39199, 39216), False, 'import __block\n'), ((39340, 39393), '__block.gsl_matrix_char_minmax_index', '__block.gsl_matrix_char_minmax_index', (['*args'], {}), '(*args, **kwargs)\n', (39376, 39393), False, 'import __block\n'), ((39517, 39564), '__block.gsl_matrix_char_isnull', '__block.gsl_matrix_char_isnull', (['*args'], {}), '(*args, **kwargs)\n', (39547, 39564), False, 'import __block\n'), ((39678, 39727), '__block.gsl_matrix_char_diagonal', '__block.gsl_matrix_char_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (39710, 39727), False, 'import __block\n'), ((39848, 39900), '__block.gsl_matrix_char_subdiagonal', '__block.gsl_matrix_char_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (39883, 39900), False, 'import __block\n'), ((40029, 40083), '__block.gsl_matrix_char_superdiagonal', '__block.gsl_matrix_char_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (40066, 40083), False, 'import __block\n'), ((40214, 40266), '__block.gsl_vector_complex_set_zero', '__block.gsl_vector_complex_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (40249, 40266), False, 'import __block\n'), ((40392, 40443), '__block.gsl_vector_complex_set_all', '__block.gsl_vector_complex_set_all', (['*args'], {}), '(*args, **kwargs)\n', (40426, 40443), False, 'import __block\n'), ((40569, 40622), '__block.gsl_vector_complex_set_basis', '__block.gsl_vector_complex_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (40605, 40622), False, 'import __block\n'), ((40748, 40797), '__block.gsl_vector_complex_fread', '__block.gsl_vector_complex_fread', (['*args'], {}), '(*args, **kwargs)\n', (40780, 40797), False, 'import __block\n'), ((40916, 40966), '__block.gsl_vector_complex_fwrite', '__block.gsl_vector_complex_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (40949, 40966), False, 'import __block\n'), ((41087, 41137), '__block.gsl_vector_complex_fscanf', '__block.gsl_vector_complex_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (41120, 41137), False, 'import __block\n'), ((41259, 41310), '__block.gsl_vector_complex_fprintf', '__block.gsl_vector_complex_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (41293, 41310), False, 'import __block\n'), ((41434, 41485), '__block.gsl_vector_complex_reverse', '__block.gsl_vector_complex_reverse', (['*args'], {}), '(*args, **kwargs)\n', (41468, 41485), False, 'import __block\n'), ((41606, 41654), '__block.gsl_vector_complex_swap', '__block.gsl_vector_complex_swap', (['*args'], {}), '(*args, **kwargs)\n', (41637, 41654), False, 'import __block\n'), ((41778, 41835), '__block.gsl_vector_complex_swap_elements', '__block.gsl_vector_complex_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (41818, 41835), False, 'import __block\n'), ((41970, 42020), '__block.gsl_vector_complex_isnull', '__block.gsl_vector_complex_isnull', (['*args'], {}), '(*args, **kwargs)\n', (42003, 42020), False, 'import __block\n'), ((42143, 42195), '__block.gsl_matrix_complex_set_zero', '__block.gsl_matrix_complex_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (42178, 42195), False, 'import __block\n'), ((42321, 42372), '__block.gsl_matrix_complex_set_all', '__block.gsl_matrix_complex_set_all', (['*args'], {}), '(*args, **kwargs)\n', (42355, 42372), False, 'import __block\n'), ((42501, 42557), '__block.gsl_matrix_complex_set_identity', '__block.gsl_matrix_complex_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (42540, 42557), False, 'import __block\n'), ((42689, 42738), '__block.gsl_matrix_complex_fread', '__block.gsl_matrix_complex_fread', (['*args'], {}), '(*args, **kwargs)\n', (42721, 42738), False, 'import __block\n'), ((42857, 42907), '__block.gsl_matrix_complex_fwrite', '__block.gsl_matrix_complex_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (42890, 42907), False, 'import __block\n'), ((43028, 43078), '__block.gsl_matrix_complex_fscanf', '__block.gsl_matrix_complex_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (43061, 43078), False, 'import __block\n'), ((43200, 43251), '__block.gsl_matrix_complex_fprintf', '__block.gsl_matrix_complex_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (43234, 43251), False, 'import __block\n'), ((43372, 43420), '__block.gsl_matrix_complex_swap', '__block.gsl_matrix_complex_swap', (['*args'], {}), '(*args, **kwargs)\n', (43403, 43420), False, 'import __block\n'), ((43540, 43593), '__block.gsl_matrix_complex_swap_rows', '__block.gsl_matrix_complex_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (43576, 43593), False, 'import __block\n'), ((43726, 43782), '__block.gsl_matrix_complex_swap_columns', '__block.gsl_matrix_complex_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (43765, 43782), False, 'import __block\n'), ((43920, 43975), '__block.gsl_matrix_complex_swap_rowcol', '__block.gsl_matrix_complex_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (43958, 43975), False, 'import __block\n'), ((44109, 44162), '__block.gsl_matrix_complex_transpose', '__block.gsl_matrix_complex_transpose', (['*args'], {}), '(*args, **kwargs)\n', (44145, 44162), False, 'import __block\n'), ((44289, 44339), '__block.gsl_matrix_complex_isnull', '__block.gsl_matrix_complex_isnull', (['*args'], {}), '(*args, **kwargs)\n', (44322, 44339), False, 'import __block\n'), ((44462, 44514), '__block.gsl_matrix_complex_diagonal', '__block.gsl_matrix_complex_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (44497, 44514), False, 'import __block\n'), ((44644, 44699), '__block.gsl_matrix_complex_subdiagonal', '__block.gsl_matrix_complex_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (44682, 44699), False, 'import __block\n'), ((44837, 44894), '__block.gsl_matrix_complex_superdiagonal', '__block.gsl_matrix_complex_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (44877, 44894), False, 'import __block\n'), ((45037, 45095), '__block.gsl_vector_complex_float_set_zero', '__block.gsl_vector_complex_float_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (45078, 45095), False, 'import __block\n'), ((45239, 45296), '__block.gsl_vector_complex_float_set_all', '__block.gsl_vector_complex_float_set_all', (['*args'], {}), '(*args, **kwargs)\n', (45279, 45296), False, 'import __block\n'), ((45440, 45499), '__block.gsl_vector_complex_float_set_basis', '__block.gsl_vector_complex_float_set_basis', (['*args'], {}), '(*args, **kwargs)\n', (45482, 45499), False, 'import __block\n'), ((45643, 45698), '__block.gsl_vector_complex_float_fread', '__block.gsl_vector_complex_float_fread', (['*args'], {}), '(*args, **kwargs)\n', (45681, 45698), False, 'import __block\n'), ((45835, 45891), '__block.gsl_vector_complex_float_fwrite', '__block.gsl_vector_complex_float_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (45874, 45891), False, 'import __block\n'), ((46030, 46086), '__block.gsl_vector_complex_float_fscanf', '__block.gsl_vector_complex_float_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (46069, 46086), False, 'import __block\n'), ((46226, 46283), '__block.gsl_vector_complex_float_fprintf', '__block.gsl_vector_complex_float_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (46266, 46283), False, 'import __block\n'), ((46425, 46482), '__block.gsl_vector_complex_float_reverse', '__block.gsl_vector_complex_float_reverse', (['*args'], {}), '(*args, **kwargs)\n', (46465, 46482), False, 'import __block\n'), ((46621, 46675), '__block.gsl_vector_complex_float_swap', '__block.gsl_vector_complex_float_swap', (['*args'], {}), '(*args, **kwargs)\n', (46658, 46675), False, 'import __block\n'), ((46817, 46880), '__block.gsl_vector_complex_float_swap_elements', '__block.gsl_vector_complex_float_swap_elements', (['*args'], {}), '(*args, **kwargs)\n', (46863, 46880), False, 'import __block\n'), ((47033, 47089), '__block.gsl_vector_complex_float_isnull', '__block.gsl_vector_complex_float_isnull', (['*args'], {}), '(*args, **kwargs)\n', (47072, 47089), False, 'import __block\n'), ((47230, 47288), '__block.gsl_matrix_complex_float_set_zero', '__block.gsl_matrix_complex_float_set_zero', (['*args'], {}), '(*args, **kwargs)\n', (47271, 47288), False, 'import __block\n'), ((47432, 47489), '__block.gsl_matrix_complex_float_set_all', '__block.gsl_matrix_complex_float_set_all', (['*args'], {}), '(*args, **kwargs)\n', (47472, 47489), False, 'import __block\n'), ((47636, 47698), '__block.gsl_matrix_complex_float_set_identity', '__block.gsl_matrix_complex_float_set_identity', (['*args'], {}), '(*args, **kwargs)\n', (47681, 47698), False, 'import __block\n'), ((47848, 47903), '__block.gsl_matrix_complex_float_fread', '__block.gsl_matrix_complex_float_fread', (['*args'], {}), '(*args, **kwargs)\n', (47886, 47903), False, 'import __block\n'), ((48040, 48096), '__block.gsl_matrix_complex_float_fwrite', '__block.gsl_matrix_complex_float_fwrite', (['*args'], {}), '(*args, **kwargs)\n', (48079, 48096), False, 'import __block\n'), ((48235, 48291), '__block.gsl_matrix_complex_float_fscanf', '__block.gsl_matrix_complex_float_fscanf', (['*args'], {}), '(*args, **kwargs)\n', (48274, 48291), False, 'import __block\n'), ((48431, 48488), '__block.gsl_matrix_complex_float_fprintf', '__block.gsl_matrix_complex_float_fprintf', (['*args'], {}), '(*args, **kwargs)\n', (48471, 48488), False, 'import __block\n'), ((48627, 48681), '__block.gsl_matrix_complex_float_swap', '__block.gsl_matrix_complex_float_swap', (['*args'], {}), '(*args, **kwargs)\n', (48664, 48681), False, 'import __block\n'), ((48819, 48878), '__block.gsl_matrix_complex_float_swap_rows', '__block.gsl_matrix_complex_float_swap_rows', (['*args'], {}), '(*args, **kwargs)\n', (48861, 48878), False, 'import __block\n'), ((49029, 49091), '__block.gsl_matrix_complex_float_swap_columns', '__block.gsl_matrix_complex_float_swap_columns', (['*args'], {}), '(*args, **kwargs)\n', (49074, 49091), False, 'import __block\n'), ((49247, 49308), '__block.gsl_matrix_complex_float_swap_rowcol', '__block.gsl_matrix_complex_float_swap_rowcol', (['*args'], {}), '(*args, **kwargs)\n', (49291, 49308), False, 'import __block\n'), ((49460, 49519), '__block.gsl_matrix_complex_float_transpose', '__block.gsl_matrix_complex_float_transpose', (['*args'], {}), '(*args, **kwargs)\n', (49502, 49519), False, 'import __block\n'), ((49664, 49720), '__block.gsl_matrix_complex_float_isnull', '__block.gsl_matrix_complex_float_isnull', (['*args'], {}), '(*args, **kwargs)\n', (49703, 49720), False, 'import __block\n'), ((49861, 49919), '__block.gsl_matrix_complex_float_diagonal', '__block.gsl_matrix_complex_float_diagonal', (['*args'], {}), '(*args, **kwargs)\n', (49902, 49919), False, 'import __block\n'), ((50067, 50128), '__block.gsl_matrix_complex_float_subdiagonal', '__block.gsl_matrix_complex_float_subdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (50111, 50128), False, 'import __block\n'), ((50284, 50347), '__block.gsl_matrix_complex_float_superdiagonal', '__block.gsl_matrix_complex_float_superdiagonal', (['*args'], {}), '(*args, **kwargs)\n', (50330, 50347), False, 'import __block\n'), ((618, 671), 'imp.load_module', 'imp.load_module', (['"""__block"""', 'fp', 'pathname', 'description'], {}), "('__block', fp, pathname, description)\n", (633, 671), False, 'import imp\n'), ((449, 466), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (456, 466), False, 'from os.path import dirname\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import pytest
import logging
import json
import threading
from utils import get_random_dict
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
# TODO: add tests for various application properties
# TODO: is there a way to call send_c2d so it arrives as an object rather than a JSON string?
@pytest.mark.describe("Client C2d")
class TestReceiveC2d(object):
@pytest.mark.it("Can receive C2D")
@pytest.mark.quicktest_suite
def test_sync_receive_c2d(self, client, service_helper, leak_tracker):
leak_tracker.set_initial_object_list()
message = json.dumps(get_random_dict())
received_message = None
received = threading.Event()
def handle_on_message_received(message):
nonlocal received_message, received
logger.info("received {}".format(message))
received_message = message
received.set()
client.on_message_received = handle_on_message_received
service_helper.send_c2d(message, {})
received.wait(timeout=60)
assert received.is_set()
assert received_message.data.decode("utf-8") == message
received_message = None # so this isn't tagged as a leak
leak_tracker.check_for_leaks()
| [
"logging.getLogger",
"pytest.mark.describe",
"utils.get_random_dict",
"threading.Event",
"pytest.mark.it"
] | [((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((474, 508), 'pytest.mark.describe', 'pytest.mark.describe', (['"""Client C2d"""'], {}), "('Client C2d')\n", (494, 508), False, 'import pytest\n'), ((544, 577), 'pytest.mark.it', 'pytest.mark.it', (['"""Can receive C2D"""'], {}), "('Can receive C2D')\n", (558, 577), False, 'import pytest\n'), ((834, 851), 'threading.Event', 'threading.Event', ([], {}), '()\n', (849, 851), False, 'import threading\n'), ((763, 780), 'utils.get_random_dict', 'get_random_dict', ([], {}), '()\n', (778, 780), False, 'from utils import get_random_dict\n')] |
# GPLv3 License
#
# Copyright (C) 2020 Ubisoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import unittest
from bpy import data as D # noqa
from bpy import types as T # noqa
from mixer.blender_data.bpy_data_proxy import BpyDataProxy
from mixer.blender_data.diff import BpyBlendDiff
from mixer.blender_data.filter import test_properties
def sort_renamed_item(x):
return x[1]
class TestDiff(unittest.TestCase):
def setUp(self):
for w in D.worlds:
D.worlds.remove(w)
self.proxy = BpyDataProxy()
def test_create(self):
# test_diff.TestDiff.test_create
self.proxy.load(test_properties)
new_worlds = ["W0", "W1"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for collection_name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {collection_name}")
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {collection_name}")
if collection_name == "worlds":
self.assertEqual(len(new_worlds), len(delta.items_added), f"added count mismatch for {collection_name}")
found = [datablock.name for datablock, _ in delta.items_added]
found.sort()
self.assertEqual(new_worlds, found, f"added count mismatch for {collection_name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {collection_name}")
def test_remove(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
removed = ["W0", "W1"]
removed.sort()
for w in removed:
D.worlds.remove(D.worlds[w])
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {name}")
if name == "worlds":
self.assertEqual(len(removed), len(delta.items_removed), f"removed count mismatch for {name}")
items_removed = [proxy.data("name") for proxy in delta.items_removed]
items_removed.sort()
self.assertEqual(removed, items_removed, f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
def test_rename(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
renamed = [("W0", "W00"), ("W2", "W22")]
renamed.sort(key=sort_renamed_item)
for old_name, new_name in renamed:
D.worlds[old_name].name = new_name
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {name}")
if name == "worlds":
self.assertEqual(len(renamed), len(delta.items_renamed), f"renamed count mismatch for {name}")
items_renamed = list(delta.items_renamed)
items_renamed.sort(key=sort_renamed_item)
items_renamed = [(proxy.data("name"), new_name) for proxy, new_name in items_renamed]
self.assertEqual(renamed, items_renamed, f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
def test_create_delete_rename(self):
# test_diff.TestDiff.test_create
new_worlds = ["W0", "W1", "W2", "W4"]
new_worlds.sort()
for w in new_worlds:
D.worlds.new(w)
self.proxy.load(test_properties)
renamed = [("W0", "W00"), ("W2", "W22"), ("W4", "W44")]
renamed.sort(key=sort_renamed_item)
for old_name, new_name in renamed:
D.worlds[old_name].name = new_name
added = ["W0", "W5"]
added.sort()
for w in added:
D.worlds.new(w)
removed = ["W1", "W00"]
removed.sort()
for w in removed:
D.worlds.remove(D.worlds[w])
diff = BpyBlendDiff()
diff.diff(self.proxy, test_properties)
for name, delta in diff.collection_deltas:
if name == "worlds":
items_added = [datablock.name for datablock, _ in delta.items_added]
items_added.sort()
self.assertEqual(items_added, ["W0", "W5"], f"added count mismatch for {name}")
items_renamed = delta.items_renamed
items_renamed.sort(key=sort_renamed_item)
items_renamed = [(proxy.data("name"), new_name) for proxy, new_name in items_renamed]
self.assertEqual(items_renamed, [("W2", "W22"), ("W4", "W44")], f"renamed count mismatch for {name}")
items_removed = [proxy.data("name") for proxy in delta.items_removed]
items_removed.sort()
self.assertEqual(items_removed, ["W0", "W1"], f"removed count mismatch for {name}")
else:
self.assertEqual(0, len(delta.items_renamed), f"renamed count mismatch for {name}")
self.assertEqual(0, len(delta.items_removed), f"removed count mismatch for {name}")
self.assertEqual(0, len(delta.items_added), f"added count mismatch for {name}")
| [
"bpy.data.worlds.remove",
"mixer.blender_data.diff.BpyBlendDiff",
"bpy.data.worlds.new",
"mixer.blender_data.bpy_data_proxy.BpyDataProxy"
] | [((1115, 1129), 'mixer.blender_data.bpy_data_proxy.BpyDataProxy', 'BpyDataProxy', ([], {}), '()\n', (1127, 1129), False, 'from mixer.blender_data.bpy_data_proxy import BpyDataProxy\n'), ((1372, 1386), 'mixer.blender_data.diff.BpyBlendDiff', 'BpyBlendDiff', ([], {}), '()\n', (1384, 1386), False, 'from mixer.blender_data.diff import BpyBlendDiff\n'), ((2579, 2593), 'mixer.blender_data.diff.BpyBlendDiff', 'BpyBlendDiff', ([], {}), '()\n', (2591, 2593), False, 'from mixer.blender_data.diff import BpyBlendDiff\n'), ((3790, 3804), 'mixer.blender_data.diff.BpyBlendDiff', 'BpyBlendDiff', ([], {}), '()\n', (3802, 3804), False, 'from mixer.blender_data.diff import BpyBlendDiff\n'), ((5357, 5371), 'mixer.blender_data.diff.BpyBlendDiff', 'BpyBlendDiff', ([], {}), '()\n', (5369, 5371), False, 'from mixer.blender_data.diff import BpyBlendDiff\n'), ((1075, 1093), 'bpy.data.worlds.remove', 'D.worlds.remove', (['w'], {}), '(w)\n', (1090, 1093), True, 'from bpy import data as D\n'), ((1341, 1356), 'bpy.data.worlds.new', 'D.worlds.new', (['w'], {}), '(w)\n', (1353, 1356), True, 'from bpy import data as D\n'), ((2383, 2398), 'bpy.data.worlds.new', 'D.worlds.new', (['w'], {}), '(w)\n', (2395, 2398), True, 'from bpy import data as D\n'), ((2534, 2562), 'bpy.data.worlds.remove', 'D.worlds.remove', (['D.worlds[w]'], {}), '(D.worlds[w])\n', (2549, 2562), True, 'from bpy import data as D\n'), ((3532, 3547), 'bpy.data.worlds.new', 'D.worlds.new', (['w'], {}), '(w)\n', (3544, 3547), True, 'from bpy import data as D\n'), ((4858, 4873), 'bpy.data.worlds.new', 'D.worlds.new', (['w'], {}), '(w)\n', (4870, 4873), True, 'from bpy import data as D\n'), ((5202, 5217), 'bpy.data.worlds.new', 'D.worlds.new', (['w'], {}), '(w)\n', (5214, 5217), True, 'from bpy import data as D\n'), ((5312, 5340), 'bpy.data.worlds.remove', 'D.worlds.remove', (['D.worlds[w]'], {}), '(D.worlds[w])\n', (5327, 5340), True, 'from bpy import data as D\n')] |
import numpy as np
from plots import plots_for_predictions as pp
from utilss import distinct_colours as dc
import matplotlib.pyplot as plt
c = dc.get_distinct(4)
path = '/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/lr5e-5/'
p1 = np.load(path + "seed_20/predicted_sim_6_epoch_09.npy")
t1 = np.load(path + "seed_20/true_sim_6_epoch_09.npy")
p_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy")
t_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy")
path_av = "/Users/luisals/Documents/deep_halos_files/mass_range_13.4/random_20sims_200k/averaged_boxes/log_alpha_-4.3/"
p_av = np.load(path_av + "predicted_sim_6_epoch_32.npy")
t_av = np.load(path_av + "true_sim_6_epoch_32.npy")
p_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy")
t_av_big = np.load("/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy")
# Raw-density case
f1, a, m = pp.plot_histogram_predictions(p1, t1, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_av, t_av, radius_bins=False, particle_ids=None, errorbars=False,
label=r"$L_\mathrm{box}=50 \, \mathrm{Mpc} \,/ \,h$", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label=r"$L_\mathrm{box}=200 \, \mathrm{Mpc} \,/ \,h$")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf")
# Averaged-density case
f1, a, m = pp.plot_histogram_predictions(p_big, t_big, radius_bins=False, particle_ids=None, errorbars=False,
label="Raw density", color="C0")
f11, a1, m1 = pp.plot_histogram_predictions(p_av_big, t_av_big, radius_bins=False, particle_ids=None, errorbars=False, fig=f1,
axes=a, color="C1", label="Averaged density")
a1[0].set_ylabel(r"$n_{\mathrm{particles}}$", fontsize=16)
[a.set_xlabel(r"$\log(M_{\mathrm{predicted}}/M_{\mathrm{true}})$", fontsize=16) for a in a1]
plt.savefig("/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf")
| [
"plots.plots_for_predictions.plot_histogram_predictions",
"numpy.load",
"matplotlib.pyplot.savefig",
"utilss.distinct_colours.get_distinct"
] | [((144, 162), 'utilss.distinct_colours.get_distinct', 'dc.get_distinct', (['(4)'], {}), '(4)\n', (159, 162), True, 'from utilss import distinct_colours as dc\n'), ((263, 317), 'numpy.load', 'np.load', (["(path + 'seed_20/predicted_sim_6_epoch_09.npy')"], {}), "(path + 'seed_20/predicted_sim_6_epoch_09.npy')\n", (270, 317), True, 'import numpy as np\n'), ((323, 372), 'numpy.load', 'np.load', (["(path + 'seed_20/true_sim_6_epoch_09.npy')"], {}), "(path + 'seed_20/true_sim_6_epoch_09.npy')\n", (330, 372), True, 'import numpy as np\n'), ((382, 492), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/raw/predicted_sim_L200_N1024_genetIC3_epoch_10.npy'\n )\n", (389, 492), True, 'import numpy as np\n'), ((491, 596), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/raw/true_sim_L200_N1024_genetIC3_epoch_10.npy'\n )\n", (498, 596), True, 'import numpy as np\n'), ((716, 765), 'numpy.load', 'np.load', (["(path_av + 'predicted_sim_6_epoch_32.npy')"], {}), "(path_av + 'predicted_sim_6_epoch_32.npy')\n", (723, 765), True, 'import numpy as np\n'), ((773, 817), 'numpy.load', 'np.load', (["(path_av + 'true_sim_6_epoch_32.npy')"], {}), "(path_av + 'true_sim_6_epoch_32.npy')\n", (780, 817), True, 'import numpy as np\n'), ((830, 940), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/avg/predicted_sim_L200_N1024_genetIC3_epoch_18.npy'\n )\n", (837, 940), True, 'import numpy as np\n'), ((942, 1047), 'numpy.load', 'np.load', (['"""/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy"""'], {}), "(\n '/Users/luisals/Projects/DLhalos/bigbox/avg/true_sim_L200_N1024_genetIC3_epoch_18.npy'\n )\n", (949, 1047), True, 'import numpy as np\n'), ((1069, 1240), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p1', 't1'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""', 'color': '"""C0"""'}), "(p1, t1, radius_bins=False, particle_ids=None,\n errorbars=False, label=\n '$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$', color='C0')\n", (1098, 1240), True, 'from plots import plots_for_predictions as pp\n'), ((1283, 1478), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_big', 't_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""'}), "(p_big, t_big, radius_bins=False, particle_ids\n =None, errorbars=False, fig=f1, axes=a, color='C1', label=\n '$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$')\n", (1312, 1478), True, 'from plots import plots_for_predictions as pp\n'), ((1661, 1740), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf"""'], {}), "('/Users/lls/Documents/Papers/dlhalos_paper/small_vs_large_box.pdf')\n", (1672, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1955), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av', 't_av'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""', 'color': '"""C0"""'}), "(p_av, t_av, radius_bins=False, particle_ids=\n None, errorbars=False, label=\n '$L_\\\\mathrm{box}=50 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$', color='C0')\n", (1808, 1955), True, 'from plots import plots_for_predictions as pp\n'), ((1997, 2197), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av_big', 't_av_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$"""'}), "(p_av_big, t_av_big, radius_bins=False,\n particle_ids=None, errorbars=False, fig=f1, axes=a, color='C1', label=\n '$L_\\\\mathrm{box}=200 \\\\, \\\\mathrm{Mpc} \\\\,/ \\\\,h$')\n", (2026, 2197), True, 'from plots import plots_for_predictions as pp\n'), ((2381, 2483), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf"""'], {}), "(\n '/Users/luisals/Documents/Papers/dlhalos_paper/averaged_small_vs_large_box.pdf'\n )\n", (2392, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2647), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_big', 't_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'label': '"""Raw density"""', 'color': '"""C0"""'}), "(p_big, t_big, radius_bins=False, particle_ids\n =None, errorbars=False, label='Raw density', color='C0')\n", (2540, 2647), True, 'from plots import plots_for_predictions as pp\n'), ((2698, 2865), 'plots.plots_for_predictions.plot_histogram_predictions', 'pp.plot_histogram_predictions', (['p_av_big', 't_av_big'], {'radius_bins': '(False)', 'particle_ids': 'None', 'errorbars': '(False)', 'fig': 'f1', 'axes': 'a', 'color': '"""C1"""', 'label': '"""Averaged density"""'}), "(p_av_big, t_av_big, radius_bins=False,\n particle_ids=None, errorbars=False, fig=f1, axes=a, color='C1', label=\n 'Averaged density')\n", (2727, 2865), True, 'from plots import plots_for_predictions as pp\n'), ((3053, 3153), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf"""'], {}), "(\n '/Users/luisals/Documents/Papers/dlhalos_paper/raw_vs_averaged_large_box.pdf'\n )\n", (3064, 3153), True, 'import matplotlib.pyplot as plt\n')] |
from copy import deepcopy
from inspect import getfullargspec
import importlib
import json
import os
import logging
logger = logging.getLogger(__name__)
from torch.optim.optimizer import Optimizer
from paragen.optim.optimizer import Optimizer
from paragen.utils.rate_schedulers import create_rate_scheduler
from paragen.utils.runtime import Environment
from paragen.utils.registry import setup_registry
register_optim, create_optim, registry = setup_registry('optim', Optimizer, force_extend=False)
def build_optimizer(model, configs, enable_apex=False):
configs = deepcopy(configs)
name = configs.pop('class')
kwargs = {}
for k, v in configs.items():
try:
v = eval(v)
except:
pass
finally:
kwargs[k] = v
configs = kwargs
logger.info('Creating {} class with configs \n{}\n'.format(name,
json.dumps(configs, indent=4, sort_keys=True)))
lr = configs.pop('lr')
lr_scheduler = create_rate_scheduler(lr)
lr_scheduler.build()
args = getfullargspec(Optimizer).args[4:]
optimizer_kwargs = {}
for key in args:
if key in configs:
optimizer_kwargs[key] = configs.pop(key)
if name.lower() in registry:
cls = registry[name.lower()]
else:
import importlib
mod = importlib.import_module('torch.optim')
cls = getattr(mod, name)
if 'no_decay' in configs:
named_parameters = model.named_parameters()
no_decay = configs.pop('no_decay')
weight_decay = configs.pop('weight_decay')
grouped_parameters = [
{'params': [p for n, p in named_parameters if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in named_parameters if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
else:
grouped_parameters = model.parameters()
optimizer = cls(grouped_parameters, lr=lr_scheduler.rate, **configs)
env = Environment()
if env.distributed_world > 1:
import horovod.torch as hvd
hvd_kwargs = {}
if 'update_frequency' in optimizer_kwargs:
hvd_kwargs['backward_passes_per_step'] = optimizer_kwargs['update_frequency']
if env.fp16 and not enable_apex:
hvd_kwargs['compression'] = hvd.Compression.fp16
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
**hvd_kwargs)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
if enable_apex:
from apex import amp
update_frequency = optimizer_kwargs['update_frequency'] if 'update_frequency' in optimizer_kwargs else 1
model, optimizer = amp.initialize(model, optimizer,
opt_level='O1',
num_losses=update_frequency)
optimizer_kwargs['enable_apex'] = enable_apex
optimizer = Optimizer(model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, **optimizer_kwargs)
return model, optimizer
modules_dir = os.path.dirname(__file__)
for file in os.listdir(modules_dir):
path = os.path.join(modules_dir, file)
if (
not file.startswith('_')
and not file.startswith('.')
and (file.endswith('.py') or os.path.isdir(path))
):
module_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('paragen.optim.' + module_name)
| [
"logging.getLogger",
"paragen.utils.runtime.Environment",
"horovod.torch.broadcast_optimizer_state",
"os.listdir",
"paragen.utils.registry.setup_registry",
"importlib.import_module",
"json.dumps",
"os.path.join",
"inspect.getfullargspec",
"os.path.dirname",
"apex.amp.initialize",
"os.path.isdi... | [((124, 151), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (141, 151), False, 'import logging\n'), ((446, 500), 'paragen.utils.registry.setup_registry', 'setup_registry', (['"""optim"""', 'Optimizer'], {'force_extend': '(False)'}), "('optim', Optimizer, force_extend=False)\n", (460, 500), False, 'from paragen.utils.registry import setup_registry\n'), ((3315, 3340), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3330, 3340), False, 'import os\n'), ((3353, 3376), 'os.listdir', 'os.listdir', (['modules_dir'], {}), '(modules_dir)\n', (3363, 3376), False, 'import os\n'), ((573, 590), 'copy.deepcopy', 'deepcopy', (['configs'], {}), '(configs)\n', (581, 590), False, 'from copy import deepcopy\n'), ((1035, 1060), 'paragen.utils.rate_schedulers.create_rate_scheduler', 'create_rate_scheduler', (['lr'], {}), '(lr)\n', (1056, 1060), False, 'from paragen.utils.rate_schedulers import create_rate_scheduler\n'), ((2079, 2092), 'paragen.utils.runtime.Environment', 'Environment', ([], {}), '()\n', (2090, 2092), False, 'from paragen.utils.runtime import Environment\n'), ((3180, 3275), 'paragen.optim.optimizer.Optimizer', 'Optimizer', ([], {'model': 'model', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler'}), '(model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, **\n optimizer_kwargs)\n', (3189, 3275), False, 'from paragen.optim.optimizer import Optimizer\n'), ((3389, 3420), 'os.path.join', 'os.path.join', (['modules_dir', 'file'], {}), '(modules_dir, file)\n', (3401, 3420), False, 'import os\n'), ((1380, 1418), 'importlib.import_module', 'importlib.import_module', (['"""torch.optim"""'], {}), "('torch.optim')\n", (1403, 1418), False, 'import importlib\n'), ((2707, 2760), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (2736, 2760), True, 'import horovod.torch as hvd\n'), ((2951, 3028), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O1"""', 'num_losses': 'update_frequency'}), "(model, optimizer, opt_level='O1', num_losses=update_frequency)\n", (2965, 3028), False, 'from apex import amp\n'), ((3662, 3717), 'importlib.import_module', 'importlib.import_module', (["('paragen.optim.' + module_name)"], {}), "('paragen.optim.' + module_name)\n", (3685, 3717), False, 'import importlib\n'), ((940, 985), 'json.dumps', 'json.dumps', (['configs'], {'indent': '(4)', 'sort_keys': '(True)'}), '(configs, indent=4, sort_keys=True)\n', (950, 985), False, 'import json\n'), ((1098, 1123), 'inspect.getfullargspec', 'getfullargspec', (['Optimizer'], {}), '(Optimizer)\n', (1112, 1123), False, 'from inspect import getfullargspec\n'), ((3537, 3556), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3550, 3556), False, 'import os\n')] |
# State tracking for WireGuard protocol operations.
# Author: <NAME> <<EMAIL>>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>.
import base64
import hashlib
import inspect
import socket
import traceback
from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt
def calc_mac1(key, data):
mac1_key = hashlib.blake2s(b'mac1----' + key.pub).digest()
return hashlib.blake2s(data, digest_size=16, key=mac1_key).digest()
def is_bytes(value):
# Check for __bytes__ due to PublicKey / PrivateKey.
return type(value) == bytes or hasattr(value, '__bytes__')
def to_bytes(data, length, byteorder='big'):
if not data:
data = 0
if type(data) == int:
if not length:
# Indeterminate length, just expand it.
length = (data.bit_length() + 7) // 8
return data.to_bytes(length, byteorder)
if type(data) == str:
data = base64.b64decode(data)
elif not is_bytes(data):
raise RuntimeError(f'Expected bytes, got: {data!r}')
else:
data = bytes(data)
if length and len(data) != length:
print(f'Warning: want {length}, got length {len(data)}: {data!r}')
traceback.print_stack()
return data
class Storage:
def __init__(self, name, spec, variables):
self.name = name
self.spec = spec
self.instances = []
self.variables = variables
def add(self, *args, **kwargs):
return self.add_object(self.spec(*args, **kwargs))
def add_object(self, obj):
i = len(self.instances)
obj.name = f'{self.name}_{i}'
# De-duplicate
for obj2 in self.instances:
if repr(obj2) == repr(obj):
obj = obj2
break
else:
self.instances.append(obj)
self.variables[obj.name] = obj
print(f'{obj.name} = {obj}')
return obj
def resolve(self, name):
'''Resolves an item name (or the item itself) to a matching item in this
storage.'''
if name == None:
assert self.instances, f'No previous instance found for {self.name}'
return self.instances[-1]
if isinstance(name, self.spec):
name = name.name
assert self.instances, f'No instances found for {name}'
# XXX maybe this could split the name and directly use it as index.
for instance in self.instances[::-1]:
if instance.name == name:
return instance
raise RuntimeError(f'Instance name {name} not found')
class Base:
def __repr__(self):
try:
fields = self.fields
except AttributeError:
fields = list(inspect.signature(self.__init__).parameters)
params = []
for field in fields:
value = getattr(self, field)
# XXX should repr dump the full values or refer to the state name?
if hasattr(value, 'name') and False:
display = getattr(value, 'name')
elif is_bytes(value):
# Cannot just check type(value) because of PublicKey.
value = bytes(value)
if not value.replace(b'\0', b''):
# Simplify display
display = None
elif len(value) > 16:
display = repr(base64.b64encode(value).decode('utf8'))
else:
display = "b'%s'" % ''.join('\\x%02x' % x for x in value)
else:
display = repr(value)
params.append(f'{field}={display}')
params = ', '.join(params)
return f'{self.__class__.__name__}({params})'
class Address(Base):
def __init__(self, host, port):
self.host = host
self.port = int(port)
self.address = (self.host, self.port)
class LocalAddress(Address):
def __init__(self, host, port):
super().__init__(host, port)
self._socket = None
@property
def socket(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind((self.host, self.port))
print(f'{self.name}: Created socket {self._socket}')
return self._socket
class PublicKey:
def __init__(self, pub):
self.pub = to_bytes(pub, 32, byteorder='little')
def __bytes__(self):
return self.pub
def __repr__(self):
return repr(self.pub)
class PrivateKey:
def __init__(self, priv):
self.priv = to_bytes(priv, 32, byteorder='little')
self.pub = PublicKey(crypto_scalarmult_base(self.priv))
def __bytes__(self):
return self.priv
def __repr__(self):
return repr(self.priv)
class StateI0(Base):
def __init__(self, SpubR, EprivI, SprivI, time, psk):
if not SpubR:
raise RuntimeError('Missing SpubR')
self.SpubR = PublicKey(SpubR)
self.EprivI = PrivateKey(EprivI)
self.SprivI = PrivateKey(SprivI)
self.time = to_bytes(time, 12)
self.psk = to_bytes(psk, 32)
self._compute_hs()
@property
def EpubI(self):
return self.EprivI.pub
@property
def SpubI(self):
return self.SprivI.pub
def _compute_hs(self):
hs = NoiseWG()
# pre-message
hs.mix_hash(self.SpubR)
# message from initiator to responder
hs.mix_hash(self.EpubI)
hs.mix_key(self.EpubI)
hs.mix_dh(self.EprivI, self.SpubR)
self.enc_SpubI = hs.encrypt_and_hash(self.SpubI)
hs.mix_dh(self.SprivI, self.SpubR)
self.enc_time = hs.encrypt_and_hash(self.time)
self.handshake_state = hs
class StateR0(Base):
def __init__(self, EprivR, SprivR, psk):
self.EprivR = PrivateKey(EprivR)
self.SprivR = PrivateKey(SprivR)
self.psk = to_bytes(psk, 32)
def EpubI(self):
return crypto_scalarmult_base(self.EprivR)
class StateI1(Base):
fields = ['Tsend', 'Trecv']
def __init__(self, StateI0, EpubR):
if not StateI0:
raise RuntimeError('Missing handshake initiation state')
if not EpubR:
raise RuntimeError('Missing handshake initiation details')
self._compute_hs(StateI0, EpubR, StateI0.handshake_state.copy())
def _compute_hs(self, StateI0, EpubR, hs):
hs.mix_hash(EpubR)
hs.mix_key(EpubR)
hs.mix_dh(StateI0.EprivI, EpubR)
hs.mix_dh(StateI0.SprivI, EpubR)
hs.mix_key_and_hash(StateI0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Tsend, self.Trecv = hs.split()
class StateR1(Base):
# SpubI and time are not really needed by the handshake, but perhaps this
# could serve as debugging aid.
fields = ['SpubI', 'time', 'Tsend', 'Trecv']
def __init__(self, StateR0, EpubI, enc_SpubI, enc_time):
if not StateR0:
raise RuntimeError('Missing handshake response state')
if not EpubI or not enc_SpubI or not enc_time:
raise RuntimeError('Missing handshake response details')
self._compute_hs(StateR0, EpubI, enc_SpubI, enc_time)
def _compute_hs(self, StateR0, EpubI, enc_SpubI, enc_time):
hs = NoiseWG()
# pre-message
hs.mix_hash(StateR0.SprivR.pub)
# message from initiator to responder
hs.mix_hash(EpubI)
hs.mix_key(EpubI)
hs.mix_dh(StateR0.SprivR, EpubI)
self.SpubI = PublicKey(hs.decrypt_and_hash(enc_SpubI))
hs.mix_dh(StateR0.SprivR, self.SpubI)
self.time = hs.decrypt_and_hash(enc_time)
# message from responder to initiator
self.EpubR = StateR0.EprivR.pub
hs.mix_hash(self.EpubR)
hs.mix_key(self.EpubR)
hs.mix_dh(StateR0.EprivR, EpubI)
hs.mix_dh(StateR0.EprivR, self.SpubI)
hs.mix_key_and_hash(StateR0.psk)
self.enc_empty = hs.encrypt_and_hash(b'')
self.Trecv, self.Tsend = hs.split()
class Data(Base):
def __init__(self, data):
self.data = to_bytes(data, 0)
class Field:
def __init__(self, name, size, constructor=None, fixed=None):
self.name = name
self.size = size
self.fixed = fixed
if constructor is None:
def constructor(data): return to_bytes(data, size)
self._constructor = constructor
def parse_value(self, value):
return self._constructor(value)
class Message(Base):
def __init__(self, *args, **kwargs):
# Do not expose fixed fields through the constructor.
self.fields = [f.name for f in self.fields_desc if not f.fixed]
for i, value in enumerate(args):
name = self.fields[i]
assert name not in kwargs, f'Duplicate parameter: {name}'
kwargs[name] = value
for f in self.fields_desc:
val = kwargs.pop(f.name, None)
val = f.parse_value(val)
assert not f.size or len(bytes(val)) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
setattr(self, f.name, val)
assert not kwargs, f'Unexpected parameters: {kwargs}'
def __bytes__(self):
bs = b''
for f in self.fields_desc:
val = f.fixed
if val is None:
val = bytes(getattr(self, f.name))
assert not f.size or len(val) == f.size, \
f'Expected size {f.size} for {f.name}, got {len(val)}: {val!r}'
bs += val
return bs
@classmethod
def from_bytes(cls, bs):
min_size = sum(f.size for f in cls.fields_desc)
assert len(bs) >= min_size, f'Missing data: {len(bs)} < {min_size}'
fields = {}
for fs in cls.fields_desc:
if not fs.size:
# No explicit size set, consume remaining data
value, bs = bs, None
else:
value, bs = bs[:fs.size], bs[fs.size:]
# Ignore values in fixed fields.
if not fs.fixed:
value = fs.parse_value(value)
fields[fs.name] = value
assert not bs, f'Trailing data: {bs}'
return cls(**fields)
class MsgType1(Message):
fields_desc = (
Field('type', 4, fixed=b'\1\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubI', 32, PublicKey),
Field('enc_SpubI', 48),
Field('enc_time', 28),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubR=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubR = PublicKey(SpubR)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubR, msg)
msg += self.mac2
return msg
class MsgType2(Message):
fields_desc = (
Field('type', 4, fixed=b'\2\0\0\0'),
Field('sender', 4, lambda x: to_bytes(x, 4, 'little')),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('EpubR', 32, PublicKey),
Field('enc_empty', 16),
Field('mac1', 16, fixed=b'\0' * 16), # overwritten later
Field('mac2', 16),
)
def __init__(self, *args, SpubI=None, **kwargs):
super().__init__(*args, **kwargs)
self.SpubI = PublicKey(SpubI)
def __bytes__(self):
msg = super().__bytes__()
msg = msg[:-32]
msg += calc_mac1(self.SpubI, msg)
msg += self.mac2
return msg
class MsgType3(Message):
fields_desc = (
Field('type', 4, fixed=b'\3\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('nonce', 24),
Field('enc_cookie', 32),
)
class MsgType4(Message):
fields_desc = (
Field('type', 4, fixed=b'\4\0\0\0'),
Field('receiver', 4, lambda x: to_bytes(x, 4, 'little')),
Field('counter', 8, lambda x: to_bytes(x, 8, 'little')),
Field('enc_payload', 0),
)
class State:
def __init__(self):
variables = {}
self.addrL = Storage('addrL', LocalAddress, variables)
self.addrR = Storage('addrR', Address, variables)
self.StateI0 = Storage('StateI0', StateI0, variables)
self.StateI1 = Storage('StateI1', StateI1, variables)
self.StateR0 = Storage('StateR0', StateR0, variables)
self.StateR1 = Storage('StateR1', StateR1, variables)
self.MsgType1 = Storage('MsgType1', MsgType1, variables)
self.MsgType2 = Storage('MsgType2', MsgType2, variables)
self.MsgType3 = Storage('MsgType3', MsgType3, variables)
self.MsgType4 = Storage('MsgType4', MsgType4, variables)
self.Data = Storage('Data', Data, variables)
self.variables = {}
def _wait_for_message(self, what, addrL):
addrL = self.addrL.resolve(addrL)
msg_class = what.spec
print(f'Wait for {msg_class.__name__} on {addrL}')
# XXX increase this for testing data messages with higher MTU?
data, address = addrL.socket.recvfrom(4096)
addrR = self.addrR.add(*address)
msg = msg_class.from_bytes(data)
what.add_object(msg)
return msg, addrR
def _send_message(self, what, msg, addrR, addrL):
msg = what.resolve(msg)
addrR = self.addrR.resolve(addrR)
addrL = self.addrL.resolve(addrL)
addrL.socket.sendto(bytes(msg), addrR.address)
def set_local(self, host, port):
return self.addrL.add(host, port)
def set_remote(self, host, port):
return self.addrR.add(host, port)
def noise_init(self, SpubR=None, EprivI=None, SprivI=None, time=None, psk=None):
return self.StateI0.add(SpubR, EprivI, SprivI, time, psk)
def noise_resp(self, EprivR=None, SprivR=None, psk=None):
return self.StateR0.add(EprivR, SprivR, psk)
def make_init(self, sender=None, StateI0=None):
sender = to_bytes(sender, 4, 'little')
StateI0 = self.StateI0.resolve(StateI0)
return self.MsgType1.add(sender, StateI0.EpubI.pub, StateI0.enc_SpubI,
StateI0.enc_time, SpubR=StateI0.SpubR.pub)
def send_init(self, MsgType1=None, addrR=None, addrL=None):
self._send_message(self.MsgType1, MsgType1, addrR, addrL)
def wait_for_init(self, addrL=None):
return self._wait_for_message(self.MsgType1, addrL)
def process_init(self, MsgType1=None, StateR0=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
StateR0 = self.StateR0.resolve(StateR0)
return self.StateR1.add(StateR0, MsgType1.EpubI, MsgType1.enc_SpubI,
MsgType1.enc_time)
def make_resp(self, MsgType1=None, sender=None, StateR1=None):
MsgType1 = self.MsgType1.resolve(MsgType1)
receiver = MsgType1.sender
sender = to_bytes(sender, 4, 'little')
StateR1 = self.StateR1.resolve(StateR1)
return self.MsgType2.add(sender, receiver, StateR1.EpubR.pub,
StateR1.enc_empty,
SpubI=StateR1.SpubI.pub)
def send_resp(self, MsgType2=None, addrR=None, addrL=None):
self._send_message(self.MsgType2, MsgType2, addrR, addrL)
def wait_for_resp(self, addrL=None):
return self._wait_for_message(self.MsgType2, addrL)
def process_resp(self, MsgType2=None, StateI0=None):
MsgType2 = self.MsgType2.resolve(MsgType2)
StateI0 = self.StateI0.resolve(StateI0)
return self.StateI1.add(StateI0, MsgType2.EpubR)
def _make_data(self, receiver=None, counter=None, Tsend=None, data=None):
receiver = to_bytes(receiver, 4, 'little')
counter = to_bytes(counter, 8, 'little')
assert len(Tsend) == 32
data = data or b''
nonce = int.from_bytes(counter, 'little')
enc_data = aead_encrypt(Tsend, nonce, data, b'')
return self.MsgType4.add(receiver, counter, enc_data)
def make_data_as_init(self, receiver=None, counter=None, TsendI=None, data=None):
StateI1 = self.StateI1.resolve(TsendI)
return self._make_data(receiver, counter, StateI1.Tsend, data)
def make_data_as_resp(self, receiver=None, counter=None, TsendR=None, data=None):
StateR1 = self.StateR1.resolve(TsendR)
return self._make_data(receiver, counter, StateR1.Tsend, data)
def send_data(self, MsgType4=None, addrR=None, addrL=None):
self._send_message(self.MsgType4, MsgType4, addrR, addrL)
def wait_for_data(self, addrL=None):
return self._wait_for_message(self.MsgType4, addrL)
def _process_data(self, MsgType4=None, Trecv=None):
assert len(Trecv) == 32
MsgType4 = self.MsgType4.resolve(MsgType4)
nonce = int.from_bytes(MsgType4.counter, 'little')
data = aead_decrypt(Trecv, nonce, MsgType4.enc_payload, b'')
return self.Data.add(data)
def process_data_as_init(self, MsgType4=None, TrecvI=None):
StateI1 = self.StateI1.resolve(TrecvI)
return self._process_data(MsgType4, StateI1.Trecv)
def process_data_as_resp(self, MsgType4=None, TrecvR=None):
StateR1 = self.StateR1.resolve(TrecvR)
return self._process_data(MsgType4, StateR1.Trecv)
| [
"noise_wg.NoiseWG",
"hashlib.blake2s",
"socket.socket",
"traceback.print_stack",
"base64.b64encode",
"inspect.signature",
"base64.b64decode",
"noise_wg.aead_encrypt",
"noise_wg.crypto_scalarmult_base",
"noise_wg.aead_decrypt"
] | [((935, 957), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (951, 957), False, 'import base64\n'), ((1207, 1230), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (1228, 1230), False, 'import traceback\n'), ((5328, 5337), 'noise_wg.NoiseWG', 'NoiseWG', ([], {}), '()\n', (5335, 5337), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((5957, 5992), 'noise_wg.crypto_scalarmult_base', 'crypto_scalarmult_base', (['self.EprivR'], {}), '(self.EprivR)\n', (5979, 5992), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((7269, 7278), 'noise_wg.NoiseWG', 'NoiseWG', ([], {}), '()\n', (7276, 7278), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((15946, 15983), 'noise_wg.aead_encrypt', 'aead_encrypt', (['Tsend', 'nonce', 'data', "b''"], {}), "(Tsend, nonce, data, b'')\n", (15958, 15983), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((16903, 16956), 'noise_wg.aead_decrypt', 'aead_decrypt', (['Trecv', 'nonce', 'MsgType4.enc_payload', "b''"], {}), "(Trecv, nonce, MsgType4.enc_payload, b'')\n", (16915, 16956), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((351, 389), 'hashlib.blake2s', 'hashlib.blake2s', (["(b'mac1----' + key.pub)"], {}), "(b'mac1----' + key.pub)\n", (366, 389), False, 'import hashlib\n'), ((410, 461), 'hashlib.blake2s', 'hashlib.blake2s', (['data'], {'digest_size': '(16)', 'key': 'mac1_key'}), '(data, digest_size=16, key=mac1_key)\n', (425, 461), False, 'import hashlib\n'), ((4093, 4141), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (4106, 4141), False, 'import socket\n'), ((4637, 4670), 'noise_wg.crypto_scalarmult_base', 'crypto_scalarmult_base', (['self.priv'], {}), '(self.priv)\n', (4659, 4670), False, 'from noise_wg import NoiseWG, crypto_scalarmult_base, aead_encrypt, aead_decrypt\n'), ((2725, 2757), 'inspect.signature', 'inspect.signature', (['self.__init__'], {}), '(self.__init__)\n', (2742, 2757), False, 'import inspect\n'), ((3375, 3398), 'base64.b64encode', 'base64.b64encode', (['value'], {}), '(value)\n', (3391, 3398), False, 'import base64\n')] |
from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["<NAME> (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid()) | [
"common.load_yara_rules",
"common.data_strings",
"common.AndroidParseModule.__init__",
"common.ModuleMetadata",
"base64.b64decode"
] | [((227, 415), 'common.ModuleMetadata', 'ModuleMetadata', ([], {'module_name': '"""dendroid"""', 'bot_name': '"""Dendroid"""', 'description': '"""Android RAT"""', 'authors': "['<NAME> (@botnet_hunter)']", 'version': '"""1.0.0"""', 'date': '"""August 18, 2014"""', 'references': '[]'}), "(module_name='dendroid', bot_name='Dendroid', description=\n 'Android RAT', authors=['<NAME> (@botnet_hunter)'], version='1.0.0',\n date='August 18, 2014', references=[])\n", (241, 415), False, 'from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata\n'), ((509, 546), 'common.AndroidParseModule.__init__', 'AndroidParseModule.__init__', (['self', 'md'], {}), '(self, md)\n', (536, 546), False, 'from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata\n'), ((886, 992), 'common.data_strings', 'data_strings', (['file_data'], {'charset': '"""ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="""'}), "(file_data, charset=\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/=')\n", (898, 992), False, 'from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata\n'), ((694, 726), 'common.load_yara_rules', 'load_yara_rules', (['"""dendroid.yara"""'], {}), "('dendroid.yara')\n", (709, 726), False, 'from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata\n'), ((1029, 1041), 'base64.b64decode', 'b64decode', (['s'], {}), '(s)\n', (1038, 1041), False, 'from base64 import b64decode\n')] |
all
import tweepy, config, users, re, groupy
from tweepy import OAuthHandler
from tweepy import API
print(tweepy.__version__)
auth = OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_token,config.access_token_secret)
api = tweepy.API(auth)
from groupy.client import Client
client = Client.from_token(config.groupme_token)
def messenger(tickr):
for group in client.groups.list():
if group.name=="COMMonMENTions":
# print(group.name)
# msg ="Mentioned by pharmdca and mrzackmorris: "+ str(tickr)
message = group.post(text="(<50 Tweets) Mentioned by @ripster47, @pharmdca and @mrzackmorris: "+ str(tickr))
exp = r'\$([A-Z]{3,4})'
one = []
two = []
three = []
all = []
#mrzackmorris
for user in users.list[:1]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in one:
one.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", one)
#pharmdca
for user in users.list[1:2]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in two:
two.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", two)
#ripster47
for user in users.list[2:3]:
userID = user
tweets = api.user_timeline(screen_name=userID,count=100, include_rts = False, tweet_mode='extended')
for info in tweets:
if re.findall(exp,info.full_text):
for ticker in re.findall(exp,info.full_text):
if ticker not in three:
three.append(ticker)
# print(user, " mentioned ", re.findall(exp,info.full_text))
print(user, "mentioned", three)
a_set = set(one)
b_set = set(two)
c_set = set(three)
if (a_set & b_set & c_set):
all.append(a_set & b_set & c_set)
print("All 3 mentioned ", all)
messenger(all)
else: print("Nothing Notable")
| [
"re.findall",
"tweepy.API",
"groupy.client.Client.from_token",
"tweepy.OAuthHandler"
] | [((133, 190), 'tweepy.OAuthHandler', 'OAuthHandler', (['config.consumer_key', 'config.consumer_secret'], {}), '(config.consumer_key, config.consumer_secret)\n', (145, 190), False, 'from tweepy import OAuthHandler\n'), ((267, 283), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (277, 283), False, 'import tweepy, config, users, re, groupy\n'), ((326, 365), 'groupy.client.Client.from_token', 'Client.from_token', (['config.groupme_token'], {}), '(config.groupme_token)\n', (343, 365), False, 'from groupy.client import Client\n'), ((962, 993), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (972, 993), False, 'import tweepy, config, users, re, groupy\n'), ((1434, 1465), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (1444, 1465), False, 'import tweepy, config, users, re, groupy\n'), ((1020, 1051), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (1030, 1051), False, 'import tweepy, config, users, re, groupy\n'), ((1492, 1523), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (1502, 1523), False, 'import tweepy, config, users, re, groupy\n'), ((1931, 1962), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (1941, 1962), False, 'import tweepy, config, users, re, groupy\n'), ((1993, 2024), 're.findall', 're.findall', (['exp', 'info.full_text'], {}), '(exp, info.full_text)\n', (2003, 2024), False, 'import tweepy, config, users, re, groupy\n')] |
"""
---OK---
"""
from collections import OrderedDict
import copy
import numpy as np
from crystalpy.examples.Values import Interval
class PlotData1D(object):
"""
Represents a 1D plot. The graph data together with related information.
"""
def __init__(self, title, title_x_axis, title_y_axis):
"""
Constructor.
:param title: Plot title.
:param title_x_axis: X axis' title.
:param title_y_axis: Y axis' title.
"""
# Set titles.
self.title = title
self.title_x_axis = title_x_axis
self.title_y_axis = title_y_axis
# Initialize X and Y ranges.
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
# Initialize X and Y data.
self.x = None
self.y = None
# Initialize plot information to empty ordered dictionary.
self._plot_info = OrderedDict()
def set_x_min(self, x_min):
"""
Sets x range minimum.
:param x_min: X range minimum.
"""
self.x_min = x_min
def set_x_max(self, x_max):
"""
Sets X range maximum.
:param x_max: X range maximum.
"""
self.x_max = x_max
def set_y_min(self, y_min):
"""
Sets Y range minimum.
:param y_min: Y range minimum.
"""
self.y_min = y_min
def set_y_max(self, y_max):
"""
Sets Y range maximum.
:param y_max: Y range maximum.
"""
self.y_max = y_max
def set_x(self, x):
"""
Sets X data.
:param x: x data.
"""
self.x = x
def set_y(self, y):
"""
Sets Y data.
:param y: y data.
"""
self.y = y
def _set_interval_to_zero(self, indices, lower=True, upper=True):
"""
Sets the y's to zero in certain intervals of x's (extrema included).
:param indices: pair with the two extrema of the x interval.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
try:
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# in the index range defined by inf_index and sup_index, set the y's to zero.
for i in range(inf_index, sup_index + 1):
self.y[i] = 0
except TypeError:
print("\nERROR: could not set the values to zero in the specified intervals.\n")
def _unwrap_interval(self, indices, deg, lower=True, upper=True):
"""
Unwraps the y data vector in a certain interval.
:param indices: indices determining the interval to unwrap.
:param deg: True if values are in degrees. False if radians.
:param lower: if True include the lower end of the interval.
:param upper: if True include the upper end of the interval.
"""
inf_index = indices.inf
sup_index = indices.sup
# adjust the indices according to the lower and upper parameters.
if not lower:
inf_index += 1
if not upper:
sup_index -= 1
# numpy.unwrap works on data in radians, so if the data is in degrees, it needs to be converted.
if deg:
self.y = np.deg2rad(self.y)
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
# convert back to degrees.
self.y = np.rad2deg(self.y)
return
# cut out the part to unwrap and then stitch it back on.
temp = self.y[inf_index:sup_index + 1]
self.y[inf_index:sup_index + 1] = np.unwrap(temp)
def _optimize_interval(self, indices, phase_limits):
"""
Takes an interval and restricts it so that the extrema match the points where the phase
becomes bigger(smaller) than some upper(lower) limit.
:param indices: indices corresponding to the interval to be optimized.
:param phase_limits: the limits of the phase to be used for the optimization, [min, max].
:return: indices of the optimized interval.
"""
inf = indices.inf
sup = indices.sup
# check the intervals.
if (self.y[inf] > phase_limits[1] or
self.y[inf] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: First value in the interval exceeds limitations.")
return indices
if (self.y[sup] > phase_limits[1] or
self.y[sup] < phase_limits[0]):
print("\nERROR in PlotData1D._optimize_interval: Last value in the interval exceeds limitations.")
return indices
# starting from the lower end.
i = inf # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i += 1
# if the conditions are not satisfied for index i:
new_inf = i - 1
# starting from the upper end.
i = sup # counter initialization.
while phase_limits[0] < self.y[i] < phase_limits[1]:
i -= 1
# if the conditions are not satisfied for index i:
new_sup = i + 1
new_indices = Interval(new_inf, new_sup)
# check that the inf is smaller than (or equal to) the sup.
if not new_indices.check_extrema():
print("\nERROR in PlotData1D._optimize_interval: The phase might be undersampled.")
return indices
return new_indices
def smart_unwrap(self, intervals, intervals_number, phase_limits, deg):
"""
Unwraps data correctly by avoiding discontinuities.
:param intervals: list of pairs. Each element is a pair with the two extrema of the x interval.
:param phase_limits: min and max tolerable values for the phase plot, [min, max].
:param intervals_number: number of intervals to set to zero.
:param deg: True if values are in degrees. False if radians.
"""
if intervals_number == 0:
if deg:
self.y = np.deg2rad(self.y) # unwrap works with radians.
self.y = np.unwrap(self.y)
self.y = np.rad2deg(self.y) # convert back to degrees.
return
self.y = np.unwrap(self.y)
return
# transform self.x into a numpy.ndarray object.
x = np.asarray(self.x)
# careful! only works with monotonic sequences.
temp_index = x.argmin()
for interval in intervals:
inf = interval.inf
sup = interval.sup
# find the indices of the y array corresponding to inf and sup.
inf_index = abs(x - inf).argmin()
sup_index = abs(x - sup).argmin()
# optimize the interval.
indices = Interval(inf_index, sup_index)
new_indices = self._optimize_interval(indices, phase_limits)
# unwrap the data before the interval.
indices_to_unwrap = Interval(temp_index, new_indices.inf)
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=False)
# set the interval to zero.
indices_to_set = new_indices
self._set_interval_to_zero(indices_to_set, lower=True, upper=False)
temp_index = new_indices.sup
# careful! only works with monotonic sequences.
indices_to_unwrap = Interval(temp_index, x.argmax())
self._unwrap_interval(indices_to_unwrap, deg, lower=True, upper=True)
def add_xy_point(self, x_point, y_point):
"""
Adds an x-y point.
:param x_point: x coordinate.
:param y_point: y coordinate.
"""
self.x.append(x_point)
self.y.append(y_point)
def add_plot_info(self, name, info):
"""
Adds a plot info.
:param name: Name of the info.
:param info: The info.
"""
self._plot_info[name] = info
def plot_info(self):
"""
Returns the plot info copy.
:return: The plot info.
"""
return copy.deepcopy(self._plot_info)
| [
"collections.OrderedDict",
"numpy.unwrap",
"numpy.asarray",
"numpy.deg2rad",
"copy.deepcopy",
"crystalpy.examples.Values.Interval",
"numpy.rad2deg"
] | [((926, 939), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (937, 939), False, 'from collections import OrderedDict\n'), ((3999, 4014), 'numpy.unwrap', 'np.unwrap', (['temp'], {}), '(temp)\n', (4008, 4014), True, 'import numpy as np\n'), ((5549, 5575), 'crystalpy.examples.Values.Interval', 'Interval', (['new_inf', 'new_sup'], {}), '(new_inf, new_sup)\n', (5557, 5575), False, 'from crystalpy.examples.Values import Interval\n'), ((6727, 6745), 'numpy.asarray', 'np.asarray', (['self.x'], {}), '(self.x)\n', (6737, 6745), True, 'import numpy as np\n'), ((8455, 8485), 'copy.deepcopy', 'copy.deepcopy', (['self._plot_info'], {}), '(self._plot_info)\n', (8468, 8485), False, 'import copy\n'), ((3543, 3561), 'numpy.deg2rad', 'np.deg2rad', (['self.y'], {}), '(self.y)\n', (3553, 3561), True, 'import numpy as np\n'), ((3729, 3744), 'numpy.unwrap', 'np.unwrap', (['temp'], {}), '(temp)\n', (3738, 3744), True, 'import numpy as np\n'), ((3806, 3824), 'numpy.rad2deg', 'np.rad2deg', (['self.y'], {}), '(self.y)\n', (3816, 3824), True, 'import numpy as np\n'), ((6621, 6638), 'numpy.unwrap', 'np.unwrap', (['self.y'], {}), '(self.y)\n', (6630, 6638), True, 'import numpy as np\n'), ((7162, 7192), 'crystalpy.examples.Values.Interval', 'Interval', (['inf_index', 'sup_index'], {}), '(inf_index, sup_index)\n', (7170, 7192), False, 'from crystalpy.examples.Values import Interval\n'), ((7350, 7387), 'crystalpy.examples.Values.Interval', 'Interval', (['temp_index', 'new_indices.inf'], {}), '(temp_index, new_indices.inf)\n', (7358, 7387), False, 'from crystalpy.examples.Values import Interval\n'), ((6412, 6430), 'numpy.deg2rad', 'np.deg2rad', (['self.y'], {}), '(self.y)\n', (6422, 6430), True, 'import numpy as np\n'), ((6486, 6503), 'numpy.unwrap', 'np.unwrap', (['self.y'], {}), '(self.y)\n', (6495, 6503), True, 'import numpy as np\n'), ((6529, 6547), 'numpy.rad2deg', 'np.rad2deg', (['self.y'], {}), '(self.y)\n', (6539, 6547), True, 'import numpy as np\n')] |
import unittest
from iptree import IPNode
class TestIPNode(unittest.TestCase):
def test_node_ipv4(self):
node = IPNode('0.0.0.0/0')
node.add(IPNode('127.0.0.1/32'))
assert '127.0.0.1/32' in node
assert '192.0.2.1/32' not in node
def test_node_ipv6(self):
node = IPNode('::/0')
node.add(IPNode('::1/128'))
assert '::1/128' in node
assert '2001:db8::1/128' not in node
def test_node_aggregate(self):
root = IPNode('::/0')
child = IPNode('2001:db8::/32')
child.add(IPNode('2001:db8:cafe::1'))
child.add(IPNode('2001:db8:cafe::2'))
root.add(child)
leafs = list(root.aggregate())
assert root.children == {}
assert child.parent is None
assert child.children == {}
assert len(leafs) == 2
def test_node_iter_does_not_empty(self):
root = IPNode('::/0')
root.add(IPNode('2001:db8::1'))
assert [x.network for x in root] == ['2001:db8::1']
# repeat to show that __iter__ does not empty children
assert [x.network for x in root] == ['2001:db8::1']
def test_user_data(self):
data = {
'user': 'data',
}
root = IPNode('::/0', data=data)
assert root.data['user'] == 'data'
| [
"iptree.IPNode"
] | [((127, 146), 'iptree.IPNode', 'IPNode', (['"""0.0.0.0/0"""'], {}), "('0.0.0.0/0')\n", (133, 146), False, 'from iptree import IPNode\n'), ((314, 328), 'iptree.IPNode', 'IPNode', (['"""::/0"""'], {}), "('::/0')\n", (320, 328), False, 'from iptree import IPNode\n'), ((494, 508), 'iptree.IPNode', 'IPNode', (['"""::/0"""'], {}), "('::/0')\n", (500, 508), False, 'from iptree import IPNode\n'), ((525, 548), 'iptree.IPNode', 'IPNode', (['"""2001:db8::/32"""'], {}), "('2001:db8::/32')\n", (531, 548), False, 'from iptree import IPNode\n'), ((904, 918), 'iptree.IPNode', 'IPNode', (['"""::/0"""'], {}), "('::/0')\n", (910, 918), False, 'from iptree import IPNode\n'), ((1244, 1269), 'iptree.IPNode', 'IPNode', (['"""::/0"""'], {'data': 'data'}), "('::/0', data=data)\n", (1250, 1269), False, 'from iptree import IPNode\n'), ((164, 186), 'iptree.IPNode', 'IPNode', (['"""127.0.0.1/32"""'], {}), "('127.0.0.1/32')\n", (170, 186), False, 'from iptree import IPNode\n'), ((346, 363), 'iptree.IPNode', 'IPNode', (['"""::1/128"""'], {}), "('::1/128')\n", (352, 363), False, 'from iptree import IPNode\n'), ((567, 593), 'iptree.IPNode', 'IPNode', (['"""2001:db8:cafe::1"""'], {}), "('2001:db8:cafe::1')\n", (573, 593), False, 'from iptree import IPNode\n'), ((613, 639), 'iptree.IPNode', 'IPNode', (['"""2001:db8:cafe::2"""'], {}), "('2001:db8:cafe::2')\n", (619, 639), False, 'from iptree import IPNode\n'), ((936, 957), 'iptree.IPNode', 'IPNode', (['"""2001:db8::1"""'], {}), "('2001:db8::1')\n", (942, 957), False, 'from iptree import IPNode\n')] |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module provides types definitions."""
import dataclasses
import enum
import json
import time
from typing import Any, Dict, List, Optional, Union
import uuid
import linear_algebra
import marshmallow
import marshmallow_dataclass
import marshmallow_enum
#####################################
# Utility functions #
#####################################
def decode(
schema: marshmallow.Schema, data: str, **kwargs
) -> dataclasses.dataclass:
"""Decodes input string using provided schema.
Args:
schema: Schema to be used for deserialization.
data: JSON-encoded data to be deserialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.loads` method.
Returns:
Deserialized `dataclasses.dataclass` object.
"""
return schema.loads(data, **kwargs)
def encode(
schema: marshmallow.Schema, data: dataclasses.dataclass, **kwargs
) -> str:
"""Encodes input data using provided schema.
Args:
schema: Schema to be used for serialization.
data: Dataclass object to be serialized.
**kwargs: Extra keyworded arguments to be passed to
`marshmallow.Schemas.dumps` method.
Returns:
JSON-encoded serialized data.
"""
return schema.dumps(data, separators=(",", ":"), **kwargs)
#####################################
# Types aliases #
#####################################
OperatorsType = List[linear_algebra.ops.ProbBasisAxisSum]
#####################################
# marshmallow helpers #
#####################################
_SerializedLinearAlgebraObject = Dict[str, Any]
_SerializedProbBasisAxisSums = List[List[Dict[str, Any]]]
# `linear_algebra` offers only functions to dump and load objects from the JSON encoded
# string, and does not support builtin dict objects. When we call json.dumps()
# over already JSON encoded string, all quotation marks and brackets are
# prefixed with the backslash. Instead, we can convert JSON object to the dict
# type and reduce serialized object size.
def _deserialize_linear_algebra_object(data: _SerializedLinearAlgebraObject) -> Any:
"""Deserializes linear_algebra object from dict type.
Since `linear_algebra` does not provide function to load objects from builtin dict
objects, we need some workaround here: first we dump the dict object into
JSON encoded string, then parse them into `linear_algebra` object.
Args:
data: Dict encoded linear_algebra object.
Returns:
Deserialized linear_algebra object.
"""
return linear_algebra.read_json(json_text=json.dumps(data))
def _serialize_linear_algebra_object(obj: Any) -> _SerializedLinearAlgebraObject:
"""Serializes linear_algebra object to dict type.
Since `linear_algebra` does not provide function to dump objects into builtin dict
objects, we need some workaround here: first we dump the `linear_algebra` object into
JSON encoded string, then parsing them into dict object.
Args:
data: linear_algebra object to be encoded.
Returns:
Serialized linear_algebra object.
"""
return json.loads(linear_algebra.to_json(obj))
class _LinearAlgebraField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes `linear_algebra` type
object."""
def _serialize(
self, value: Any, *_args, **_kwargs
) -> _SerializedLinearAlgebraObject:
"""See base class documentation."""
return _serialize_linear_algebra_object(value)
def _deserialize(
self, value: _SerializedLinearAlgebraObject, *_args, **_kwargs
) -> Any:
"""See base class documentation."""
try:
return _deserialize_linear_algebra_object(value)
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
class _OperatorsField(marshmallow.fields.Field):
"""`marshmallow.fields.Field` that serializes and deserializes
`linear_algebra.ProbBasisAxisSum` operators."""
def _serialize(
self, value: OperatorsType, _attr, _obj, **kwargs
) -> _SerializedProbBasisAxisSums:
"""See base class documentation."""
if not isinstance(value, list):
value = [value]
return [[_serialize_linear_algebra_object(term) for term in op] for op in value]
def _deserialize(
self, value: _SerializedProbBasisAxisSums, _attr, _obj, **kwargs
) -> OperatorsType:
"""See base class documentation."""
try:
return [
sum([_deserialize_linear_algebra_object(term) for term in op])
for op in value
]
except json.JSONDecodeError as ex:
raise marshmallow.ValidationError("Not a JSON object") from ex
Graph = marshmallow_dataclass.NewType(
"Graph", linear_algebra.Graph, field=_LinearAlgebraField
)
Operators = marshmallow_dataclass.NewType(
"Operators", OperatorsType, field=_OperatorsField
)
ParamResolver = marshmallow_dataclass.NewType(
"ParamResolver", linear_algebra.ParamResolver, field=_LinearAlgebraField
)
Result = marshmallow_dataclass.NewType("Result", linear_algebra.Result, field=_LinearAlgebraField)
Sweepable = marshmallow_dataclass.NewType(
"Sweepable", linear_algebra.study.Sweepable, field=_LinearAlgebraField
)
#####################################
# Server side events #
#####################################
@dataclasses.dataclass
class ServerSideEvent:
"""Base class for server side event.
Both `event` and `timestamp` fields are auto-populated if using default
values:
- `event` is set to the class name
- `timestamp` is set to the current time
Attributes:
id: Event unique id.
data: Event payload.
event: Event name.
timestamp: Event timestamp (in UNIX seconds).
"""
id: uuid.UUID # pylint: disable=invalid-name
data: Any
event: str = dataclasses.field(default="")
timestamp: int = dataclasses.field(default=0)
def __post_init__(self) -> None:
if self.event == "":
self.event = self.__class__.__name__
if self.timestamp == 0:
self.timestamp = int(time.time())
@dataclasses.dataclass
class StreamTimeoutEvent(ServerSideEvent):
"""Server side event that indicates the stream connection reached the
maximum timeout (10 minutes)."""
data: Optional[Any] = dataclasses.field(default=None)
#####################################
# API relevant types #
#####################################
@dataclasses.dataclass
class APIError:
"""API error response.
Attributes:
code: HTTP error code.
message: Error details.
"""
code: int
message: str
#####################################
# Jobs relevant types #
#####################################
@dataclasses.dataclass
class BatchJobContext:
"""Simulation batch job context.
Attributes:
acyclic_graphs (List[linear_algebra.Graph]): List of acyclic_graphs to be run as a batch.
params (List[linear_algebra.study.Sweepable]): List of parameters to be used
with acyclic_graphs, same size as list of acyclic_graphs.
"""
acyclic_graphs: List[Graph]
params: List[Sweepable]
def __post_init__(self) -> None:
if len(self.acyclic_graphs) != len(self.params):
raise ValueError(
"Number of sweeps parameters has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class JobContext:
"""Simulation job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
param_resolver (linear_algebra.ParamResolver): ParamResolver to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
param_resolver: ParamResolver
@dataclasses.dataclass
class SweepJobContext:
"""Simulation sweep job context.
Attributes:
acyclic_graph (linear_algebra.Graph): Graph to be run.
params (linear_algebra.study.Sweepable): Parameters to be used with the
acyclic_graph.
"""
acyclic_graph: Graph
params: Sweepable
class JobStatus(enum.IntEnum):
"""Current job status.
Attributes:
NOT_STARTED: The job was added to the queue.
IN_PROGRESS: The job is being processed.
COMPLETE: Simulation has been completed successfully.
ERROR: Simulation has failed.
"""
NOT_STARTED = 0
IN_PROGRESS = 1
COMPLETE = 2
ERROR = 3
@dataclasses.dataclass
class JobProgress:
"""Job computation progress.
Attributes:
current: Number of completed work units.
total: Total number of work units.
"""
completed: int = dataclasses.field(default=0)
total: int = dataclasses.field(default=1)
def __post_init__(self) -> None:
if self.completed < 0:
raise ValueError("Current work unit cannot be less than zero")
if self.total < 1:
raise ValueError("Total number of work units cannot be less than 1")
if self.completed > self.total:
raise ValueError(
"Current work unit cannot be greater than total work units"
)
@dataclasses.dataclass
class JobResult:
"""Simulation job result.
Attributes:
id: Unique job id.
status: Current job status.
error_message: Optional error message explaining why the computation
failed, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.ERROR`.
progress: Optional computation progress, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.IN_PROGRESS`.
result: Optional simulation job result, only set if the `status` is
:attr:`parallel_accel.client.schemas.JobStatus.COMPLETE`.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus, by_value=True
)
}
)
error_message: Optional[str] = dataclasses.field(default=None)
progress: Optional[JobProgress] = dataclasses.field(default=None)
result: Optional[Any] = dataclasses.field(default=None)
def __post_init__(self) -> None:
if self.status == JobStatus.IN_PROGRESS and self.progress is None:
raise ValueError("Missing job progress")
if self.status == JobStatus.ERROR:
if not self.error_message:
raise ValueError("Missing error messsage")
if self.result:
raise ValueError("Failed job cannot have result field")
if self.status == JobStatus.COMPLETE:
if not self.result:
raise ValueError("Missing job result")
if self.error_message:
raise ValueError(
"Completed job cannot have error_message field"
)
if (
self.progress is not None
and self.progress.total != self.progress.completed
):
raise ValueError("Not all work units are marked as completed")
@dataclasses.dataclass
class JobStatusEvent(ServerSideEvent):
"""Job status changed event.
Attributes:
data: Simulation job result.
"""
data: JobResult
@dataclasses.dataclass
class JobSubmitted:
"""Submitted job.
Attributes:
id: Unique job id.
"""
id: uuid.UUID # pylint: disable=invalid-name
#####################################
# Expectation job relevant types #
#####################################
@dataclasses.dataclass
class ExpectationBatchJobContext(BatchJobContext):
"""Expectation values batch job context.
Attributes:
operators (List[List[linear_algebra.ops.ProbBasisAxisSum]]): List of list of
`linear_algebra.ops.ProbBasisAxisSum` operators, same size as list of acyclic_graphs.
"""
operators: List[Operators]
def __post_init__(self) -> None:
super().__post_init__()
if len(self.operators) != len(self.acyclic_graphs):
raise ValueError(
"Number of operators has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class ExpectationBatchJobResult(JobResult):
"""Expectation values batch job result.
Attributes:
result: List of expectation values list, same size as number of
acyclic_graphs. Each element has the outer size of input sweep parameters
and the inner size of input operators size.
"""
result: Optional[List[List[List[float]]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobContext(JobContext):
"""Expectation values job context.
Attributes:
operators (linear_algebra.ops.ProbBasisAxisSum): List of `linear_algebra.ops.ProbBasisAxisSum` operators.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationJobResult(JobResult):
"""Expectation values job result.
Attributes:
result: List of floats, same size as input operators size.
"""
result: Optional[List[float]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationSweepJobContext(SweepJobContext):
"""Expectation values sweep job context.
Attributes:
operators (List[linear_algebra.ops.ProbBasisAxisSum]): List of `linear_algebra.ops.ProbBasisAxisSum`
operators, same size as list of acyclic_graphs.
"""
operators: Operators
@dataclasses.dataclass
class ExpectationSweepJobResult(JobResult):
"""Expectation values sweep job result.
Attributes:
result: List of expectation values list. The outer size is the same as
input sweep size, the inner size is the same size as input operators
size.
"""
result: Optional[List[List[float]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class ExpectationJobStatusEvent(JobStatusEvent):
"""Expectation job status changed event.
Attributes:
data: Expectation job result.
"""
data: Union[
ExpectationJobResult,
ExpectationBatchJobResult,
ExpectationSweepJobResult,
]
########################################
# Noisy expectation job relevant types #
########################################
@dataclasses.dataclass
class NoisyExpectationJobContext(ExpectationJobContext):
"""Noisy expectation job context.
Attributes:
num_samples: Number of times the operators will run. Can be specified as
a single value or list of same size as input operators.
"""
# We cannot set default field value for Union type
num_samples: Union[int, List[int]]
def __post_init__(self) -> None:
if isinstance(self.num_samples, list) and (
len(self.num_samples) != len(self.operators)
):
raise ValueError(
"Number of num_samples has to match number of operators"
)
@dataclasses.dataclass
class NoisyExpectationJobResult(ExpectationJobResult):
"""Noisy expectation job result."""
@dataclasses.dataclass
class NoisyExpectationJobStatusEvent(JobStatusEvent):
"""Noisy expecation job status changed event.
Attributes:
data: Noisy expecation job result.
"""
data: NoisyExpectationJobResult
#####################################
# Sample job relevant types #
#####################################
@dataclasses.dataclass
class SampleBatchJobContext(BatchJobContext):
"""Sample batch job context.
Attributes:
repetitions: Number of times the acyclic_graphs will run. Can be specified as
a single value or list of same size as input acyclic_graphs.
"""
class RepetitionsValidator(
marshmallow.validate.Validator
): # pylint: disable=too-few-public-methods
"""A Helper class for validating repetitions field value."""
def __call__(
self, value: Union[int, List[int]]
) -> Union[int, List[int]]:
if isinstance(value, list) and not all(x > 0 for x in value):
raise marshmallow.ValidationError(
"All elements must be greater than or equal to 1"
)
if isinstance(value, int) and not value > 0:
raise marshmallow.ValidationError(
"Must be greater than or equal to 1"
)
return value
# We cannot set default field value for Union type
repetitions: Union[int, List[int]] = dataclasses.field(
metadata={"validate": RepetitionsValidator()}
)
def __post_init__(self) -> None:
super().__post_init__()
if isinstance(self.repetitions, list) and (
len(self.repetitions) != len(self.acyclic_graphs)
):
raise ValueError(
"Number of repetitions has to match number of acyclic_graphs"
)
@dataclasses.dataclass
class SampleBatchJobResult(JobResult):
"""Sample batch job result.
Attributes:
result (Optional[List[List[linear_algebra.Result]]]): Output from running the
acyclic_graph.
"""
result: Optional[List[List[Result]]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobContext(JobContext):
"""Sample job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleJobResult(JobResult):
"""Sample job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[Result] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleSweepJobContext(SweepJobContext):
"""Sample sweep job context.
Attributes:
repetitions: Number of times the acyclic_graph will run.
"""
repetitions: int = dataclasses.field(
default=1, metadata={"validate": marshmallow.validate.Range(min=1)}
)
@dataclasses.dataclass
class SampleSweepJobResult(JobResult):
"""Sample sweep job result.
Attributes:
result: Output from running the acyclic_graph.
"""
result: Optional[List[Result]] = dataclasses.field(default=None)
@dataclasses.dataclass
class SampleJobStatusEvent(JobStatusEvent):
"""Sample job status changed event.
Attributes:
data: Sample job result.
"""
data: Union[SampleJobResult, SampleBatchJobResult, SampleSweepJobResult]
#####################################
# Jobs queue relevant types #
#####################################
class JobType(enum.IntEnum):
"""Simulation job type.
Attributes:
SAMPLE: Sampling.
EXPECTATION: Expectation values.
NOISY_EXPECTATION: Noisy expectation values.
"""
SAMPLE = 0
EXPECTATION = 1
NOISY_EXPECTATION = 2
@dataclasses.dataclass
class JobsQueue:
"""Current status of jobs queue.
Attributes:
ids: List of pending jobs ids.
"""
ids: List[uuid.UUID] = dataclasses.field(default_factory=[])
@dataclasses.dataclass
class PendingJob:
"""Queued job details.
Attributes:
id: Unique job id.
status: Current job status.
type: Job type.
"""
id: uuid.UUID # pylint: disable=invalid-name
status: JobStatus = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobStatus,
by_value=True,
)
}
)
type: JobType = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
JobType, by_value=True
)
}
)
def __post_init__(self) -> None:
if self.status in (JobStatus.COMPLETE, JobStatus.ERROR):
raise ValueError(
f"PendingJob cannot have {self.status.name} status"
)
#####################################
# Tasks relevant types #
#####################################
class TaskState(enum.IntEnum):
"""Current task state.
Attributes:
PENDING: Task is scheduled for execution.
RUNNING: Task is running.
DONE: Task is finished.
"""
PENDING = 0
RUNNING = 1
DONE = 2
@dataclasses.dataclass
class TaskStatus:
"""Current task status.
Attributes:
state: Current task state.
error: Optional error message explaining why the task failed, only set
if the state is :attr:`parallel_accel.client.schemas.TaskState.DONE` and the
`success` flag is False.
success: Optional flag indicating whether task finished successfully,
only set if the task state is
:attr:`parallel_accel.client.schemas.TaskState.DONE`.
"""
state: TaskState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
TaskState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
success: Optional[bool] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if self.state != TaskState.DONE and (
(self.error is not None) or (self.success is not None)
):
field = "error" if self.error is not None else "success"
raise ValueError(f"Unfinished task cannot have {field} field.")
@dataclasses.dataclass
class TaskSubmitted:
"""Submitted task.
Attributes:
id: Unique task id.
"""
id: uuid.UUID # pylint: disable=invalid-name
@dataclasses.dataclass
class TaskStatusEvent(ServerSideEvent):
"""Task status changed event.
Attributes:
data: Task status.
"""
data: TaskStatus
#####################################
# Worker relevant types #
#####################################
class WorkerState(enum.IntEnum):
"""ASIC worker state.
Attributes:
BOOTING: Worker is booting.
ERROR: Worker encountered an error.
IDLE: Worker is idling.
OFFLINE: Worker is offline.
PROCESSING_JOB: Worker is processing a job.
SHUTTING_DOWN: Worker is shutting down.
"""
OFFLINE = 0
BOOTING = 1
SHUTTING_DOWN = 2
IDLE = 3
PROCESSING_JOB = 4
ERROR = 5
@dataclasses.dataclass
class Worker:
"""Current status of the ASIC worker.
Attributes:
state: Current worker state.
error: Optional error message explaining problem with the worker, only
set when the `state` is
:attr:`parallel_accel.client.schemas.WorkerState.ERROR`.
job_id: Currently processed job id, only set when the `state` is
:obj:`parallel_accel.client.schemas.WorkerState.PROCESSING_JOB`.
"""
state: WorkerState = dataclasses.field(
metadata={
"marshmallow_field": marshmallow_enum.EnumField(
WorkerState, by_value=True
)
}
)
error: Optional[str] = dataclasses.field(default=None)
job_id: Optional[uuid.UUID] = dataclasses.field(default=None)
def __post_init__(self) -> None:
"""See base class documentation."""
if (
self.state
not in (
WorkerState.PROCESSING_JOB,
WorkerState.ERROR,
)
and ((self.error is not None) or (self.job_id is not None))
):
raise ValueError(
"Cannot have extra properties for the worker status "
f"{self.state.name}"
)
if self.state == WorkerState.ERROR:
if not self.error:
raise ValueError("Missing error messsage")
if self.job_id:
raise ValueError("Cannot have job_id field for the ERROR state")
if self.state == WorkerState.PROCESSING_JOB:
if not self.job_id:
raise ValueError("Missing job id")
if self.error:
raise ValueError("Cannot have error field for the IDLE state")
#####################################
# marshmallow schemas #
#####################################
class _SSERenderer:
"""A helper class for serializing and deserializing objects to server side
events message format.
The server side event message is UTF-8 text data separated by a pair of
newline characters.
"""
@staticmethod
def dumps(obj: Dict[str, Any], *_args, **_kwargs) -> str:
r"""Encodes input object into text string.
Args:
obj: Object to be serialized.
Returns:
Text string in format:
{key}: {value}\n
...
\n
"""
result = ""
for key in ("event", "id", "timestamp", "data"):
value = obj.get(key, None)
if not value:
continue
if key == "data":
value = json.dumps(value, separators=(",", ":"))
result += f"{key}: {value}\n"
result += "\n"
return result
@staticmethod
def loads( # pylint: disable=invalid-name
s: str, *_args, **_kwargs
) -> Dict[str, Any]:
"""Decodes input text string into dict object.
Args:
s: Text string to be decoded.
Returns:
Dict object.
"""
obj = {}
for line in s.split("\n"):
line = line.strip()
if not line:
continue
key, value = line.split(": ")
if key == "data":
value = json.loads(value)
obj[key] = value
return obj
class _BaseSchema(marshmallow.Schema):
"""Base `marshmallow.schema.Schema` for ParallelAccel related schemas.
This is a helper schema that provides custom `marsobj_fnllow.post_dump` method,
that excludes all None fields from the final serialization result.
"""
@marshmallow.post_dump
def remove_empty_fields( # pylint: disable=no-self-use
self, data: Dict, **_kwargs
) -> Dict[str, Any]:
"""Removes all None fields from the input data.
Args:
data: Input data dictionary object.
Returns:
Filtered dictionary object.
"""
return {k: v for k, v in data.items() if v is not None}
class _SSEBaseSchema(_BaseSchema):
"""Base `marshmallow.schema.Schema` for ParallelAccel service server side events."""
class Meta: # pylint: disable=too-few-public-methods
"""Metadata passed to the `marshmallow.schemas.Schema` constructor."""
render_module = _SSERenderer
(
APIErrorSchema,
ExpectationBatchJobContextSchema,
ExpectationBatchJobResultSchema,
ExpectationJobContextSchema,
ExpectationJobResultSchema,
ExpectationJobStatusEventSchema,
ExpectationSweepJobContextSchema,
ExpectationSweepJobResultSchema,
JobProgressSchema,
JobResultSchema,
JobStatusEventSchema,
JobSubmittedSchema,
JobsQueueSchema,
NoisyExpectationJobContextSchema,
NoisyExpectationJobResultSchema,
NoisyExpectationJobStatusEventSchema,
PendingJobSchema,
SampleBatchJobContextSchema,
SampleBatchJobResultSchema,
SampleJobContextSchema,
SampleJobResultSchema,
SampleJobStatusEventSchema,
SampleSweepJobContextSchema,
SampleSweepJobResultSchema,
ServerSideEventSchema,
StreamTimeoutEventSchema,
TaskStatusEventSchema,
TaskStatusSchema,
TaskSubmittedSchema,
WorkerSchema,
) = tuple(
marshmallow_dataclass.class_schema(x, base_schema=y)()
for x, y in (
(APIError, None),
(ExpectationBatchJobContext, None),
(ExpectationBatchJobResult, _BaseSchema),
(ExpectationJobContext, None),
(ExpectationJobResult, _BaseSchema),
(ExpectationJobStatusEvent, _SSEBaseSchema),
(ExpectationSweepJobContext, None),
(ExpectationSweepJobResult, _BaseSchema),
(JobProgress, None),
(JobResult, _BaseSchema),
(JobStatusEvent, _SSEBaseSchema),
(JobSubmitted, None),
(JobsQueue, None),
(NoisyExpectationJobContext, None),
(NoisyExpectationJobResult, _BaseSchema),
(NoisyExpectationJobStatusEvent, _SSEBaseSchema),
(PendingJob, None),
(SampleBatchJobContext, None),
(SampleBatchJobResult, _BaseSchema),
(SampleJobContext, None),
(SampleJobResult, _BaseSchema),
(SampleJobStatusEvent, _SSEBaseSchema),
(SampleSweepJobContext, None),
(SampleSweepJobResult, _BaseSchema),
(ServerSideEvent, _SSEBaseSchema),
(StreamTimeoutEvent, _SSEBaseSchema),
(TaskStatusEvent, _SSEBaseSchema),
(TaskStatus, _BaseSchema),
(TaskSubmitted, None),
(Worker, _BaseSchema),
)
)
| [
"json.loads",
"marshmallow.ValidationError",
"marshmallow_enum.EnumField",
"marshmallow_dataclass.class_schema",
"linear_algebra.to_json",
"marshmallow.validate.Range",
"json.dumps",
"marshmallow_dataclass.NewType",
"time.time",
"dataclasses.field"
] | [((5580, 5672), 'marshmallow_dataclass.NewType', 'marshmallow_dataclass.NewType', (['"""Graph"""', 'linear_algebra.Graph'], {'field': '_LinearAlgebraField'}), "('Graph', linear_algebra.Graph, field=\n _LinearAlgebraField)\n", (5609, 5672), False, 'import marshmallow_dataclass\n'), ((5686, 5771), 'marshmallow_dataclass.NewType', 'marshmallow_dataclass.NewType', (['"""Operators"""', 'OperatorsType'], {'field': '_OperatorsField'}), "('Operators', OperatorsType, field=_OperatorsField\n )\n", (5715, 5771), False, 'import marshmallow_dataclass\n'), ((5789, 5896), 'marshmallow_dataclass.NewType', 'marshmallow_dataclass.NewType', (['"""ParamResolver"""', 'linear_algebra.ParamResolver'], {'field': '_LinearAlgebraField'}), "('ParamResolver', linear_algebra.ParamResolver,\n field=_LinearAlgebraField)\n", (5818, 5896), False, 'import marshmallow_dataclass\n'), ((5908, 6002), 'marshmallow_dataclass.NewType', 'marshmallow_dataclass.NewType', (['"""Result"""', 'linear_algebra.Result'], {'field': '_LinearAlgebraField'}), "('Result', linear_algebra.Result, field=\n _LinearAlgebraField)\n", (5937, 6002), False, 'import marshmallow_dataclass\n'), ((6010, 6115), 'marshmallow_dataclass.NewType', 'marshmallow_dataclass.NewType', (['"""Sweepable"""', 'linear_algebra.study.Sweepable'], {'field': '_LinearAlgebraField'}), "('Sweepable', linear_algebra.study.Sweepable,\n field=_LinearAlgebraField)\n", (6039, 6115), False, 'import marshmallow_dataclass\n'), ((6743, 6772), 'dataclasses.field', 'dataclasses.field', ([], {'default': '""""""'}), "(default='')\n", (6760, 6772), False, 'import dataclasses\n'), ((6794, 6822), 'dataclasses.field', 'dataclasses.field', ([], {'default': '(0)'}), '(default=0)\n', (6811, 6822), False, 'import dataclasses\n'), ((7224, 7255), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (7241, 7255), False, 'import dataclasses\n'), ((9564, 9592), 'dataclasses.field', 'dataclasses.field', ([], {'default': '(0)'}), '(default=0)\n', (9581, 9592), False, 'import dataclasses\n'), ((9610, 9638), 'dataclasses.field', 'dataclasses.field', ([], {'default': '(1)'}), '(default=1)\n', (9627, 9638), False, 'import dataclasses\n'), ((10981, 11012), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (10998, 11012), False, 'import dataclasses\n'), ((11051, 11082), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (11068, 11082), False, 'import dataclasses\n'), ((11111, 11142), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (11128, 11142), False, 'import dataclasses\n'), ((13538, 13569), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (13555, 13569), False, 'import dataclasses\n'), ((14071, 14102), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (14088, 14102), False, 'import dataclasses\n'), ((14803, 14834), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (14820, 14834), False, 'import dataclasses\n'), ((18178, 18209), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (18195, 18209), False, 'import dataclasses\n'), ((18710, 18741), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (18727, 18741), False, 'import dataclasses\n'), ((19275, 19306), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (19292, 19306), False, 'import dataclasses\n'), ((20105, 20142), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': '[]'}), '(default_factory=[])\n', (20122, 20142), False, 'import dataclasses\n'), ((22096, 22127), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (22113, 22127), False, 'import dataclasses\n'), ((22158, 22189), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (22175, 22189), False, 'import dataclasses\n'), ((24144, 24175), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (24161, 24175), False, 'import dataclasses\n'), ((24210, 24241), 'dataclasses.field', 'dataclasses.field', ([], {'default': 'None'}), '(default=None)\n', (24227, 24241), False, 'import dataclasses\n'), ((3902, 3929), 'linear_algebra.to_json', 'linear_algebra.to_json', (['obj'], {}), '(obj)\n', (3924, 3929), False, 'import linear_algebra\n'), ((3359, 3375), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3369, 3375), False, 'import json\n'), ((28712, 28764), 'marshmallow_dataclass.class_schema', 'marshmallow_dataclass.class_schema', (['x'], {'base_schema': 'y'}), '(x, base_schema=y)\n', (28746, 28764), False, 'import marshmallow_dataclass\n'), ((4582, 4630), 'marshmallow.ValidationError', 'marshmallow.ValidationError', (['"""Not a JSON object"""'], {}), "('Not a JSON object')\n", (4609, 4630), False, 'import marshmallow\n'), ((5513, 5561), 'marshmallow.ValidationError', 'marshmallow.ValidationError', (['"""Not a JSON object"""'], {}), "('Not a JSON object')\n", (5540, 5561), False, 'import marshmallow\n'), ((7005, 7016), 'time.time', 'time.time', ([], {}), '()\n', (7014, 7016), False, 'import time\n'), ((10847, 10899), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['JobStatus'], {'by_value': '(True)'}), '(JobStatus, by_value=True)\n', (10873, 10899), False, 'import marshmallow_enum\n'), ((17080, 17158), 'marshmallow.ValidationError', 'marshmallow.ValidationError', (['"""All elements must be greater than or equal to 1"""'], {}), "('All elements must be greater than or equal to 1')\n", (17107, 17158), False, 'import marshmallow\n'), ((17277, 17342), 'marshmallow.ValidationError', 'marshmallow.ValidationError', (['"""Must be greater than or equal to 1"""'], {}), "('Must be greater than or equal to 1')\n", (17304, 17342), False, 'import marshmallow\n'), ((18472, 18505), 'marshmallow.validate.Range', 'marshmallow.validate.Range', ([], {'min': '(1)'}), '(min=1)\n', (18498, 18505), False, 'import marshmallow\n'), ((19020, 19053), 'marshmallow.validate.Range', 'marshmallow.validate.Range', ([], {'min': '(1)'}), '(min=1)\n', (19046, 19053), False, 'import marshmallow\n'), ((20471, 20523), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['JobStatus'], {'by_value': '(True)'}), '(JobStatus, by_value=True)\n', (20497, 20523), False, 'import marshmallow_enum\n'), ((20678, 20728), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['JobType'], {'by_value': '(True)'}), '(JobType, by_value=True)\n', (20704, 20728), False, 'import marshmallow_enum\n'), ((21970, 22022), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['TaskState'], {'by_value': '(True)'}), '(TaskState, by_value=True)\n', (21996, 22022), False, 'import marshmallow_enum\n'), ((24016, 24070), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['WorkerState'], {'by_value': '(True)'}), '(WorkerState, by_value=True)\n', (24042, 24070), False, 'import marshmallow_enum\n'), ((26096, 26136), 'json.dumps', 'json.dumps', (['value'], {'separators': "(',', ':')"}), "(value, separators=(',', ':'))\n", (26106, 26136), False, 'import json\n'), ((26749, 26766), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (26759, 26766), False, 'import json\n')] |
import collections
def cal(num):
i=1
f=factor[num]
while i*i<=num:
if num%i==0 and i<=max(n,m) and num//i<=max(n,m):
f.append(i)
i+=1
return num
def dfs(i,j):
if i==m-1 and j==n-1:
return True
if i>=m and j>=n or grid[i][j] in factor:
return False
num=cal(grid[i][j])
for p in factor[num]:
nj=num//p
if dfs(p-1,nj-1) or dfs(nj-1,p-1):
return True
return False
m=int(input())
n=int(input())
grid=[]
for i in range(m):
grid.append(list(map(int,input().split())))
factor=collections.defaultdict(list)
print('yes' if dfs(0, 0) else 'no')
| [
"collections.defaultdict"
] | [((615, 644), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (638, 644), False, 'import collections\n')] |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\tunable_utils\create_object.py
# Compiled at: 2020-05-07 00:26:47
# Size of source mod 2**32: 4106 bytes
from crafting.crafting_tunable import CraftingTuning
from objects.components.state import TunableStateValueReference, CommodityBasedObjectStateValue
from objects.system import create_object
from sims4.random import weighted_random_item
from sims4.tuning.tunable import TunableReference, TunableTuple, TunableList, TunableRange, AutoFactoryInit, HasTunableSingletonFactory, TunableFactory
import crafting, services, sims4
logger = sims4.log.Logger('CreateObject')
class ObjectCreator(HasTunableSingletonFactory, AutoFactoryInit):
@TunableFactory.factory_option
def get_definition(pack_safe):
return {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()),
pack_safe=pack_safe)}
FACTORY_TUNABLES = {'definition': TunableReference(description='\n The definition of the object to be created.\n ',
manager=(services.definition_manager()))}
def __call__(self, **kwargs):
return create_object((self.definition), **kwargs)
def get_object_definition(self):
return self.definition
def get_footprint(self):
return self.definition.get_footprint()
@property
def id(self):
return self.definition.id
def _verify_tunable_quality_value_callback(instance_class, tunable_name, source, quality, weight):
if quality not in CraftingTuning.QUALITY_STATE.values:
logger.error('A TunableRecipeCreator {} specifies an invalid quality {}.', source, quality)
class RecipeCreator(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'recipe':TunableReference(description='\n Recipe to produce an object with.\n ',
manager=services.get_instance_manager(sims4.resources.Types.RECIPE)),
'weighted_quality':TunableList(description='\n A list of weighted quality in which the object will be created.\n \n If empty, it will apply a default quality.\n ',
tunable=TunableTuple(description='\n A possible level of quality for this item that will be generated.\n This will be randomly chosen based off weight against other items in the list.\n ',
weight=TunableRange(tunable_type=int,
default=1,
minimum=1),
quality=TunableStateValueReference(class_restrictions=CommodityBasedObjectStateValue),
verify_tunable_callback=_verify_tunable_quality_value_callback))}
def __call__(self, crafter_sim=None, post_add=None, **kwargs):
choices = [(quality.weight, quality.quality) for quality in self.weighted_quality]
quality = weighted_random_item(choices) if choices else None
return crafting.crafting_interactions.create_craftable((self.recipe), crafter_sim, quality=quality, post_add=post_add)
def get_object_definition(self):
return self.recipe.final_product.definition | [
"services.get_instance_manager",
"sims4.log.Logger",
"services.definition_manager",
"objects.system.create_object",
"sims4.tuning.tunable.TunableRange",
"objects.components.state.TunableStateValueReference",
"sims4.random.weighted_random_item",
"crafting.crafting_interactions.create_craftable"
] | [((755, 787), 'sims4.log.Logger', 'sims4.log.Logger', (['"""CreateObject"""'], {}), "('CreateObject')\n", (771, 787), False, 'import crafting, services, sims4\n'), ((1437, 1477), 'objects.system.create_object', 'create_object', (['self.definition'], {}), '(self.definition, **kwargs)\n', (1450, 1477), False, 'from objects.system import create_object\n'), ((3172, 3285), 'crafting.crafting_interactions.create_craftable', 'crafting.crafting_interactions.create_craftable', (['self.recipe', 'crafter_sim'], {'quality': 'quality', 'post_add': 'post_add'}), '(self.recipe, crafter_sim,\n quality=quality, post_add=post_add)\n', (3219, 3285), False, 'import crafting, services, sims4\n'), ((3106, 3135), 'sims4.random.weighted_random_item', 'weighted_random_item', (['choices'], {}), '(choices)\n', (3126, 3135), False, 'from sims4.random import weighted_random_item\n'), ((1354, 1383), 'services.definition_manager', 'services.definition_manager', ([], {}), '()\n', (1381, 1383), False, 'import crafting, services, sims4\n'), ((2163, 2222), 'services.get_instance_manager', 'services.get_instance_manager', (['sims4.resources.Types.RECIPE'], {}), '(sims4.resources.Types.RECIPE)\n', (2192, 2222), False, 'import crafting, services, sims4\n'), ((1102, 1131), 'services.definition_manager', 'services.definition_manager', ([], {}), '()\n', (1129, 1131), False, 'import crafting, services, sims4\n'), ((2694, 2746), 'sims4.tuning.tunable.TunableRange', 'TunableRange', ([], {'tunable_type': 'int', 'default': '(1)', 'minimum': '(1)'}), '(tunable_type=int, default=1, minimum=1)\n', (2706, 2746), False, 'from sims4.tuning.tunable import TunableReference, TunableTuple, TunableList, TunableRange, AutoFactoryInit, HasTunableSingletonFactory, TunableFactory\n'), ((2777, 2854), 'objects.components.state.TunableStateValueReference', 'TunableStateValueReference', ([], {'class_restrictions': 'CommodityBasedObjectStateValue'}), '(class_restrictions=CommodityBasedObjectStateValue)\n', (2803, 2854), False, 'from objects.components.state import TunableStateValueReference, CommodityBasedObjectStateValue\n')] |
import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
def createRigidCapsule(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
SurfNode = node.createChild('Surf')
SurfNode.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x_rand)+' '+str(y_rand)+' '+str(capsule_height/2)+' '+str(-x_rand)+' '+str(-y_rand)+' '+str(- capsule_height/2))
SurfNode.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
SurfNode.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createFlexCapsule(parentNode,name,x,y,z,*args):
radius=0
if len(args)==0:
radius = random.uniform(1,3)
else:
radius = args[0]
node = parentNode.createChild(name)
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x + x_rand)+' '+str(y + y_rand)+' '+str(z + z_rand + capsule_height)+' '+str(x - x_rand)+' '+str(y - y_rand)+' '+str(z - z_rand),velocity='0 0 '+str(falling_speed))
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges='0 1',drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createCapsuleChain(parentNode,name,length,x,y,z):
node = parentNode.createChild(name)
#radius=random.uniform(1,3)
radius=0.5
height=5
x_rand=random.uniform(-0.5,0.5)
y_rand=random.uniform(-0.5,0.5)
z_rand=random.uniform(-0.5,0.5)
node = node.createChild('Surf')
ray = 3.0
t = 0.0
delta_t = 0.7
topo_edges=''
particles=''
velocities = ''
springs=''
for i in range(0,length):
particles += str(x + (ray * cos(t)).real)+' '+str(y + (ray * sin(t)).real)+' '+str(z + i*capsule_chain_height)+' '
t += delta_t
if i < length -1:
topo_edges += str(i)+' '+str(i + 1)+' '
springs += str(i)+' '+str(i + 1)+' 10 1 '+str(capsule_chain_height)+' '
velocities+='0 0 '+str(falling_speed)+' '
topo_edges += str(length - 2)+' '+str(length -1)
springs += str(length - 2)+' '+str(length -1)+' 10 1 '+str(capsule_chain_height)
node.createObject('MechanicalObject',template='Vec3d',name='falling_particles',position=particles,velocity=velocities)
node.createObject('StiffSpringForceField',template='Vec3d',name='springforcefield',stiffness='100',damping='1',spring=springs)
mass = node.createObject('UniformMass',name='mass')
node.createObject('MeshTopology', name='meshTopology34',edges=topo_edges,drawEdges='1')
node.createObject('TCapsuleModel',template='Vec3d',name='capsule_model',defaultRadius=str(radius))
return node
def createOBB(parentNode,name,x,y,z,*args):
a=0
b=0
c=0
if len(args)==0:
a=random.uniform(0.5,1.5)
b=random.uniform(0.5,1.5)
c=random.uniform(0.5,1.5)
else:
a=args[0]
b=args[1]
c=args[2]
node = parentNode.createChild(name)
meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
mass = node.createObject('UniformMass',name='mass',totalMass=1)
node.createObject('TOBBModel',template='Rigid',name='OBB_model',extents=str(a)+' '+str(b)+' '+str(c))
return node
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createCapsule(parentNode,name,x,y,z):
if random.randint(0,1) == 0:
return createRigidCapsule(parentNode,name,x,y,z)
else:
return createFlexCapsule(parentNode,name,x,y,z)
def createSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Vec3d',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z),velocity='0 0 '+str(falling_speed))
node.createObject('TSphereModel',template='Vec3d',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
def createRigidSphere(parentNode,name,x,y,z,*args):
node = parentNode.createChild(name)
r = 0
if len(args) == 0:
r=random.uniform(1,4)
else:
r = args[0]
#meca = node.createObject('MechanicalObject',name='rigidDOF',template='Rigid',position=str(x)+' '+str(y)+' '+
# str(z)+' 0 0 0 1')
#SurfNode = node.createChild('Surf')
node.createObject('MechanicalObject',template='Rigid',name='falling_particle',position=str(x)+' '+str(y)+' '+str(z)+' 0 0 0 1',velocity='0 0 '+str(falling_speed)+' 0 0 0 1')
node.createObject('TSphereModel',template='Rigid',name='sphere_model',radius=str(r))
node.createObject('UniformMass',name='mass',totalMass=1)
#SurfNode.createObject('RigidMapping',template='Rigid,Vec3d',name='rigid_mapping',input='@../rigidDOF',output='@falling_particle')
return node
| [
"random.uniform",
"random.randint"
] | [((1025, 1050), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1039, 1050), False, 'import random\n'), ((1058, 1083), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1072, 1083), False, 'import random\n'), ((1091, 1116), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1105, 1116), False, 'import random\n'), ((1886, 1911), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1900, 1911), False, 'import random\n'), ((1919, 1944), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1933, 1944), False, 'import random\n'), ((1952, 1977), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1966, 1977), False, 'import random\n'), ((2673, 2698), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (2687, 2698), False, 'import random\n'), ((2706, 2731), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (2720, 2731), False, 'import random\n'), ((2739, 2764), 'random.uniform', 'random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (2753, 2764), False, 'import random\n'), ((730, 750), 'random.uniform', 'random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (744, 750), False, 'import random\n'), ((1793, 1813), 'random.uniform', 'random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (1807, 1813), False, 'import random\n'), ((3958, 3982), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (3972, 3982), False, 'import random\n'), ((3986, 4010), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (4000, 4010), False, 'import random\n'), ((4014, 4038), 'random.uniform', 'random.uniform', (['(0.5)', '(1.5)'], {}), '(0.5, 1.5)\n', (4028, 4038), False, 'import random\n'), ((4524, 4544), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4538, 4544), False, 'import random\n'), ((4707, 4727), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4721, 4727), False, 'import random\n'), ((4960, 4980), 'random.uniform', 'random.uniform', (['(1)', '(4)'], {}), '(1, 4)\n', (4974, 4980), False, 'import random\n'), ((5764, 5784), 'random.uniform', 'random.uniform', (['(1)', '(4)'], {}), '(1, 4)\n', (5778, 5784), False, 'import random\n')] |
import pytest
import rasterio as rio
from rasterio.io import DatasetWriter
from cog_worker import Manager
from rasterio import MemoryFile, crs
TEST_COG = "tests/roads_cog.tif"
@pytest.fixture
def molleweide_manager():
return Manager(
proj="+proj=moll",
scale=50000,
)
@pytest.fixture
def sample_function():
def myfunc(worker):
return worker.read(TEST_COG)
return myfunc
def test_preview(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.preview(sample_function, max_size=123)
assert max(arr.shape) == 123, "Expected maximum array dimension to be 123px"
def test_tile(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.tile(sample_function, x=1, y=2, z=3)
assert arr.shape == (1, 256, 256), "Expected 256x256 tile"
def test_chunk_execute(molleweide_manager, sample_function):
chunks = list(molleweide_manager.chunk_execute(sample_function, chunksize=123))
for arr, bbox in chunks:
assert max(arr.shape) <= 123, "Max chunk size should be 123px"
def test_chunk_params(molleweide_manager):
chunks = list(molleweide_manager.chunk_params(chunksize=123))
assert len(chunks) == 18, "Expected ~18 chunks for 123px tiles at 50km scale"
def test__open_writer(molleweide_manager):
with MemoryFile() as memfile:
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
assert isinstance(writer, DatasetWriter)
def test_chunk_save(molleweide_manager, sample_function):
full_arr = molleweide_manager.execute(sample_function)[0]
with MemoryFile() as memfile:
molleweide_manager.chunk_save(memfile, sample_function)
memfile.seek(0)
with rio.open(memfile) as src:
assert src.profile["crs"] == crs.CRS.from_string("+proj=moll")
assert src.profile["transform"][0] == 50000
arr = src.read()
assert arr.shape == full_arr.shape
assert (
abs(arr.sum() / full_arr.data.sum() - 1) < 0.002
), "Error should be less than 0.2%"
def test__write_chunk(molleweide_manager, sample_function):
with MemoryFile() as memfile:
arr, bbox = molleweide_manager.execute(sample_function)
print(arr.mask.sum())
with molleweide_manager._open_writer(memfile, 1, rio.ubyte) as writer:
molleweide_manager._write_chunk(writer, arr, bbox)
memfile.seek(0)
with rio.open(memfile) as src:
written = src.read(masked=True)
assert (written == arr).all()
assert (written.mask == arr.mask).all()
def test__chunk_bounds(molleweide_manager):
chunk = molleweide_manager._chunk_bounds(0, 0, 123)
assert chunk == (
-18040095.696147293,
2674978.852256801,
-11890095.696147293,
8824978.852256801,
)
def test__num_chunks(molleweide_manager):
assert molleweide_manager._num_chunks(123) == (6, 3)
| [
"cog_worker.Manager",
"rasterio.open",
"rasterio.MemoryFile",
"rasterio.crs.CRS.from_string"
] | [((232, 271), 'cog_worker.Manager', 'Manager', ([], {'proj': '"""+proj=moll"""', 'scale': '(50000)'}), "(proj='+proj=moll', scale=50000)\n", (239, 271), False, 'from cog_worker import Manager\n'), ((1311, 1323), 'rasterio.MemoryFile', 'MemoryFile', ([], {}), '()\n', (1321, 1323), False, 'from rasterio import MemoryFile, crs\n'), ((1599, 1611), 'rasterio.MemoryFile', 'MemoryFile', ([], {}), '()\n', (1609, 1611), False, 'from rasterio import MemoryFile, crs\n'), ((2163, 2175), 'rasterio.MemoryFile', 'MemoryFile', ([], {}), '()\n', (2173, 2175), False, 'from rasterio import MemoryFile, crs\n'), ((1725, 1742), 'rasterio.open', 'rio.open', (['memfile'], {}), '(memfile)\n', (1733, 1742), True, 'import rasterio as rio\n'), ((2461, 2478), 'rasterio.open', 'rio.open', (['memfile'], {}), '(memfile)\n', (2469, 2478), True, 'import rasterio as rio\n'), ((1792, 1825), 'rasterio.crs.CRS.from_string', 'crs.CRS.from_string', (['"""+proj=moll"""'], {}), "('+proj=moll')\n", (1811, 1825), False, 'from rasterio import MemoryFile, crs\n')] |
from train import train_model
from utils import *
import os
import sys
pwd = os.environ.get('CLIP_DIR')
DATA_DIR = "%s/data/processed/" % pwd
exp_name = "non_multilabel"
run_name = "sentence_structurel_with_crf"
train_file_name = "MIMIC_train_binary.csv"
dev_file_name = "MIMIC_val_binary.csv"
test_file_name = "test_binary.csv"
exp_name = "outputs_binary"
train = read_sentence_structure(os.path.join(DATA_DIR, train_file_name))
dev = read_sentence_structure(os.path.join(DATA_DIR, dev_file_name))
test = read_sentence_structure(os.path.join(DATA_DIR, test_file_name))
run_name = "binary"
def main(args):
train_model(
train,
dev,
test,
args[0],
exp_name,
use_crf=True,
learning_rate=float(args[1]),
epochs=int(args[2]),
writer_preds_freq=10,
embeddings_type="BioWord",
list_of_possible_tags=["followup"],
embeddings_path="%s/CLIP/experiments/tagger/embeddings" % pwd,
)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"os.path.join",
"os.environ.get"
] | [((78, 104), 'os.environ.get', 'os.environ.get', (['"""CLIP_DIR"""'], {}), "('CLIP_DIR')\n", (92, 104), False, 'import os\n'), ((391, 430), 'os.path.join', 'os.path.join', (['DATA_DIR', 'train_file_name'], {}), '(DATA_DIR, train_file_name)\n', (403, 430), False, 'import os\n'), ((462, 499), 'os.path.join', 'os.path.join', (['DATA_DIR', 'dev_file_name'], {}), '(DATA_DIR, dev_file_name)\n', (474, 499), False, 'import os\n'), ((532, 570), 'os.path.join', 'os.path.join', (['DATA_DIR', 'test_file_name'], {}), '(DATA_DIR, test_file_name)\n', (544, 570), False, 'import os\n')] |
from django.urls import path
from . import views
app_name = 'persons'
urlpatterns = [
path('', views.PersonsTableView.as_view(),name='persons_list'),
path('persons_details/<int:pk>',views.PersonsUpdateView.as_view(),name='persons_details_edit'),
path('persons_details/create',views.PersonsCreateView.as_view(),name='persons_details_add'),
path('persons_details/<int:pk>/delete',views.PersonsDeleteView.as_view(),name="persons_details_delete"),
path('persons_details/sort',views.event_gate, name='sort'),
]
| [
"django.urls.path"
] | [((466, 525), 'django.urls.path', 'path', (['"""persons_details/sort"""', 'views.event_gate'], {'name': '"""sort"""'}), "('persons_details/sort', views.event_gate, name='sort')\n", (470, 525), False, 'from django.urls import path\n')] |
from hytra.pluginsystem import transition_feature_vector_construction_plugin
import numpy as np
from compiler.ast import flatten
class TransitionFeaturesSubtraction(
transition_feature_vector_construction_plugin.TransitionFeatureVectorConstructionPlugin
):
"""
Computes the subtraction of features in the feature vector
"""
def constructFeatureVector(
self, featureDictObjectA, featureDictObjectB, selectedFeatures
):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
features = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
features.append(
float(featureDictObjectA[key]) - float(featureDictObjectB[key])
)
else:
features.extend(
flatten(
(
featureDictObjectA[key].astype("float32")
- featureDictObjectB[key].astype("float32")
).tolist()
)
)
# there should be no nans or infs
assert np.all(np.isfinite(np.array(features)))
return features
def getFeatureNames(self, featureDictObjectA, featureDictObjectB, selectedFeatures):
assert "Global<Maximum >" not in selectedFeatures
assert "Global<Minimum >" not in selectedFeatures
assert "Histrogram" not in selectedFeatures
assert "Polygon" not in selectedFeatures
featuresNames = []
for key in selectedFeatures:
if key == "RegionCenter":
continue
else:
if (
not isinstance(featureDictObjectA[key], np.ndarray)
or featureDictObjectA[key].size == 1
):
featuresNames.append("A[{key}]-B[{key}]".format(key=key))
else:
featuresNames.extend(
[
"A[{key}][{i}]-B[{key}][{i}]".format(key=key, i=i)
for i in range(
len(
(
featureDictObjectA[key]
- featureDictObjectB[key]
).tolist()
)
)
]
)
return featuresNames
| [
"numpy.array"
] | [((1564, 1582), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1572, 1582), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
# Create your models here.
class IP_Address(models.Model):
ip = models.GenericIPAddressField(verbose_name=u"IP地址")
gateway = models.GenericIPAddressField(verbose_name=u"网关")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"掩码")
system = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"应用系统")
apply_person = models.CharField(max_length=64,default='',null=True,blank='',verbose_name=u"申请人")
state = models.CharField(max_length=20,choices=((u"已分配",u"已分配"),(u"未分配",u"未分配")),verbose_name=u"状态")
apply_time = models.DateField(default=datetime.now(),verbose_name=u"申请时间")
class IP_Range(models.Model):
start_ip = models.GenericIPAddressField(verbose_name=u"开始IP")
end_ip = models.GenericIPAddressField(verbose_name=u"结束IP")
network = models.GenericIPAddressField(verbose_name=u"网络号")
netmask = models.CharField(max_length=20,default='',verbose_name=u"掩码")
use_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"已使用IP数")
left_ip = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"未使用IP数")
create_time = models.DateField(default=datetime.now(),verbose_name=u"创建时间")
des = models.CharField(max_length=20,default='',null=True,blank='',verbose_name=u"描述") | [
"django.db.models.GenericIPAddressField",
"datetime.datetime.now",
"django.db.models.CharField"
] | [((151, 201), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""IP地址"""'}), "(verbose_name=u'IP地址')\n", (179, 201), False, 'from django.db import models\n'), ((216, 264), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""网关"""'}), "(verbose_name=u'网关')\n", (244, 264), False, 'from django.db import models\n'), ((279, 328), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""网络号"""'}), "(verbose_name=u'网络号')\n", (307, 328), False, 'from django.db import models\n'), ((343, 431), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""掩码"""'}), "(max_length=20, default='', null=True, blank='',\n verbose_name=u'掩码')\n", (359, 431), False, 'from django.db import models\n'), ((437, 527), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""应用系统"""'}), "(max_length=64, default='', null=True, blank='',\n verbose_name=u'应用系统')\n", (453, 527), False, 'from django.db import models\n'), ((539, 628), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""申请人"""'}), "(max_length=64, default='', null=True, blank='',\n verbose_name=u'申请人')\n", (555, 628), False, 'from django.db import models\n'), ((633, 735), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'choices': "((u'已分配', u'已分配'), (u'未分配', u'未分配'))", 'verbose_name': 'u"""状态"""'}), "(max_length=20, choices=((u'已分配', u'已分配'), (u'未分配', u'未分配')\n ), verbose_name=u'状态')\n", (649, 735), False, 'from django.db import models\n'), ((851, 901), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""开始IP"""'}), "(verbose_name=u'开始IP')\n", (879, 901), False, 'from django.db import models\n'), ((915, 965), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""结束IP"""'}), "(verbose_name=u'结束IP')\n", (943, 965), False, 'from django.db import models\n'), ((980, 1029), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'verbose_name': 'u"""网络号"""'}), "(verbose_name=u'网络号')\n", (1008, 1029), False, 'from django.db import models\n'), ((1044, 1107), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""', 'verbose_name': 'u"""掩码"""'}), "(max_length=20, default='', verbose_name=u'掩码')\n", (1060, 1107), False, 'from django.db import models\n'), ((1119, 1211), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""已使用IP数"""'}), "(max_length=20, default='', null=True, blank='',\n verbose_name=u'已使用IP数')\n", (1135, 1211), False, 'from django.db import models\n'), ((1218, 1310), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""未使用IP数"""'}), "(max_length=20, default='', null=True, blank='',\n verbose_name=u'未使用IP数')\n", (1234, 1310), False, 'from django.db import models\n'), ((1393, 1481), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""', 'null': '(True)', 'blank': '""""""', 'verbose_name': 'u"""描述"""'}), "(max_length=20, default='', null=True, blank='',\n verbose_name=u'描述')\n", (1409, 1481), False, 'from django.db import models\n'), ((768, 782), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (780, 782), False, 'from datetime import datetime\n'), ((1346, 1360), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1358, 1360), False, 'from datetime import datetime\n')] |
import time
import logging
try:
from pypylon import pylon
except:
pylon = None
from . input import Input
log = logging.getLogger(__name__)
# writes the framenumber to the 8-11 bytes of the image as a big-endian set of octets
def encode_framenumber(np_image, n):
for i in range(4):
np_image[0][i+7] = n & 0xFF
n>>=8
# converts time from a float in seconds to an int64 in microseconds
# writes the time to the first 7 bytes of the image as a big-endian set of octets
def encode_timestamp(np_image, timestamp):
t = int(timestamp*1e6)
for i in range(7):
np_image[0][i] = t & 0xFF
t>>=8
class CameraPylon(Input):
'''
Camera that interfaces with pylon/basler cameras.
Args:
id (int): Id of the pylon camera.
config (dict): Configuration dictionary. Accepted keywords:
pfs (str): path to a pfs file.
encode_metadata (bool): whether to bake in timestamps/frame number into the frame.
'''
def __init__(self, id=0, config={}):
if pylon is None:
raise ImportError('Pylon failed to import. Pylon camera initialization failed.')
defaults = {
'pfs': None,
'encode_metadata': False,
'format': 'rawvideo',
}
Input.__init__(self, id=id, config=config, defaults=defaults)
self.read_count = 0
def configure(self):
'''
Pylon camera configuration. Requires the pylon camera to have been opened already.
The order of these statements is important.
Populates self.config with set values.
Logs camera start.
'''
if self.config.get('pfs', None):
pylon.FeaturePersistence.Load(self.config.get('pfs'), self.input.GetNodeMap())
self.config['pixel_format'] = self.input.PixelFormat.Value
self.config['gain'] = self.input.Gain.Value
self.config['exposure_time'] = self.input.ExposureTime.Value
self.config['res'] = (self.input.Width.Value, self.input.Height.Value)
self.config['width'] = self.input.Width.Value
self.config['height'] = self.input.Height.Value
self.config['fps'] = self.input.ResultingFrameRate.GetValue()
def open(self):
self.read_count = 0
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
self.input = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(devices[self.id]))
self.input.Open()
self.configure()
self.input.StopGrabbing()
self.input.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
def read(self):
frame = None
now = None
if self.input:
try:
ret = self.input.RetrieveResult(100, pylon.TimeoutHandling_ThrowException)
if ret.IsValid():
frame = ret.GetArray()
now = time.time()
if self.config.get('encode_metadata'):
encode_timestamp(frame,now)
encode_framenumber(frame,self.read_count)
self.read_count+=1
except TypeError as e:
log.error(f"{str(self)} read error: {e}")
raise
finally:
ret.Release()
return frame, now
def close(self):
self.read_count = 0
if self.input and self.input.IsOpen():
self.input.Close()
self.input = None
| [
"logging.getLogger",
"pypylon.pylon.TlFactory.GetInstance",
"time.time"
] | [((121, 148), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'import logging\n'), ((2296, 2325), 'pypylon.pylon.TlFactory.GetInstance', 'pylon.TlFactory.GetInstance', ([], {}), '()\n', (2323, 2325), False, 'from pypylon import pylon\n'), ((2894, 2905), 'time.time', 'time.time', ([], {}), '()\n', (2903, 2905), False, 'import time\n'), ((2386, 2415), 'pypylon.pylon.TlFactory.GetInstance', 'pylon.TlFactory.GetInstance', ([], {}), '()\n', (2413, 2415), False, 'from pypylon import pylon\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from phonopy.interface.calculator import read_crystal_structure
from phonopy.structure.atoms import PhonopyAtoms
from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms
import numpy as np
def assert_same_phonopy_atoms(actual: PhonopyAtoms,
expected: PhonopyAtoms):
assert (actual.get_cell() == expected.get_cell()).all()
assert (actual.get_scaled_positions()
== expected.get_scaled_positions()).all()
assert actual.symbols == expected.symbols
def test_phonopy_atoms_behavior(sc_structure, tmpdir):
print(tmpdir)
tmpdir.chdir()
# actual = structure_to_phonopy_atoms(sc_structure)
sc_structure.to(fmt="poscar", filename="POSCAR")
a, _ = read_crystal_structure("POSCAR")
b = PhonopyAtoms(atoms=a)
print(type(a.get_cell()))
print(a.get_atomic_numbers())
assert_same_phonopy_atoms(a, b)
def test_structure_to_phonopy_atoms(sc_structure):
actual = structure_to_phonopy_atoms(sc_structure)
expected = PhonopyAtoms(symbols=["H"],
cell=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]),
scaled_positions=np.array([[0.0, 0.0, 0.0]]))
assert_same_phonopy_atoms(actual, expected)
#
# def test_make_phonopy_input(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure,
# supercell_matrix=np.eye(3).tolist(),
# conventional_base=True)
# supercell_matrix = [[ 1., 1., 0.],
# [-1., 1., 0.],
# [ 0., 0., 1.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure)
# supercell_matrix = [[ 2., 2., 0.],
# [-2., 2., 0.],
# [ 0., 0., 2.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default_hexa():
# structure = Structure(Lattice.hexagonal(1.0, 2.0), species=["H"],
# coords=[[0.0]*3])
# actual = make_phonopy_input(unitcell=structure)
# supercell_matrix = [[2, -1, 0], [2, 1, 0], [0, 0, 2]]
# supercell = structure * supercell_matrix
# expected = PhonopyInput(unitcell=structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
| [
"numpy.array",
"phonopy.interface.calculator.read_crystal_structure",
"phonopy.structure.atoms.PhonopyAtoms",
"vise.util.phonopy.phonopy_input.structure_to_phonopy_atoms"
] | [((822, 854), 'phonopy.interface.calculator.read_crystal_structure', 'read_crystal_structure', (['"""POSCAR"""'], {}), "('POSCAR')\n", (844, 854), False, 'from phonopy.interface.calculator import read_crystal_structure\n'), ((863, 884), 'phonopy.structure.atoms.PhonopyAtoms', 'PhonopyAtoms', ([], {'atoms': 'a'}), '(atoms=a)\n', (875, 884), False, 'from phonopy.structure.atoms import PhonopyAtoms\n'), ((1051, 1091), 'vise.util.phonopy.phonopy_input.structure_to_phonopy_atoms', 'structure_to_phonopy_atoms', (['sc_structure'], {}), '(sc_structure)\n', (1077, 1091), False, 'from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms\n'), ((1168, 1229), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (1176, 1229), True, 'import numpy as np\n'), ((1362, 1389), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (1370, 1389), True, 'import numpy as np\n')] |
"""
2020 Day 15
https://adventofcode.com/2020/day/15
"""
from collections import deque
from typing import Dict, Iterable, Optional
import aocd # type: ignore
class ElfMemoryGame:
def __init__(self, starting_numbers: Iterable[int]):
self.appearances: Dict[int, deque[int]] = {}
self.length = 0
for number in starting_numbers:
self.add(number)
def __len__(self) -> int:
return self.length
def next_number(self, previous: Optional[int] = None) -> int:
previous = previous or self.latest
appeared = self.appearances[previous]
return abs(appeared[1] - appeared[0])
def extend(self, length: int) -> None:
while self.length < length:
self.add(self.next_number())
def add(self, number: int) -> None:
if number in self.appearances:
self.appearances[number].append(self.length)
else:
self.appearances[number] = deque([self.length, self.length], maxlen=2)
self.length += 1
self.latest = number
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2020, day=15)
emg = ElfMemoryGame(map(int, data.split(",")))
emg.extend(2020)
print(f"Part 1: {emg.latest}")
emg.extend(30_000_000)
print(f"Part 2: {emg.latest}")
if __name__ == "__main__":
main()
| [
"collections.deque",
"aocd.get_data"
] | [((1175, 1207), 'aocd.get_data', 'aocd.get_data', ([], {'year': '(2020)', 'day': '(15)'}), '(year=2020, day=15)\n', (1188, 1207), False, 'import aocd\n'), ((957, 1000), 'collections.deque', 'deque', (['[self.length, self.length]'], {'maxlen': '(2)'}), '([self.length, self.length], maxlen=2)\n', (962, 1000), False, 'from collections import deque\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
from conda.base.constants import DepsModifier, UpdateModifier
from conda._vendor.boltons.setutils import IndexedSet
from conda.core.prefix_data import PrefixData
from conda.models.prefix_graph import PrefixGraph
from conda._vendor.toolz import concatv
from conda.models.match_spec import MatchSpec
def post_solve_handling(context, prefix_data, final_precs, specs_to_add, specs_to_remove):
# Special case handling for various DepsModifier flags.
if context.deps_modifier == DepsModifier.NO_DEPS:
# In the NO_DEPS case, we need to start with the original list of packages in the
# environment, and then only modify packages that match specs_to_add or
# specs_to_remove.
#
# Help information notes that use of NO_DEPS is expected to lead to broken
# environments.
_no_deps_solution = IndexedSet(prefix_data.iter_records())
only_remove_these = set(prec
for spec in specs_to_remove
for prec in _no_deps_solution
if spec.match(prec))
_no_deps_solution -= only_remove_these
only_add_these = set(prec
for spec in specs_to_add
for prec in final_precs
if spec.match(prec))
remove_before_adding_back = set(prec.name for prec in only_add_these)
_no_deps_solution = IndexedSet(prec for prec in _no_deps_solution
if prec.name not in remove_before_adding_back)
_no_deps_solution |= only_add_these
# ssc.solution_precs = _no_deps_solution
solution_precs = _no_deps_solution
return solution_precs, specs_to_add, specs_to_remove
# TODO: check if solution is satisfiable, and emit warning if it's not
elif (context.deps_modifier == DepsModifier.ONLY_DEPS
and context.update_modifier != UpdateModifier.UPDATE_DEPS):
# Using a special instance of PrefixGraph to remove youngest child nodes that match
# the original specs_to_add. It's important to remove only the *youngest* child nodes,
# because a typical use might be `conda install --only-deps python=2 flask`, and in
# that case we'd want to keep python.
#
# What are we supposed to do if flask was already in the environment?
# We can't be removing stuff here that's already in the environment.
#
# What should be recorded for the user-requested specs in this case? Probably all
# direct dependencies of flask.
graph = PrefixGraph(final_precs, specs_to_add)
removed_nodes = graph.remove_youngest_descendant_nodes_with_specs()
specs_to_add = set(specs_to_add)
specs_to_add_names = set((s.name for s in specs_to_add))
for prec in removed_nodes:
for dep in prec.depends:
dep = MatchSpec(dep)
if dep.name not in specs_to_add_names:
specs_to_add.add(dep)
# unfreeze
specs_to_add = frozenset(specs_to_add)
# Add back packages that are already in the prefix.
specs_to_remove_names = set(spec.name for spec in specs_to_remove)
add_back = tuple(prefix_data.get(node.name, None) for node in removed_nodes
if node.name not in specs_to_remove_names)
solution_precs = tuple(
PrefixGraph(concatv(graph.graph, filter(None, add_back))).graph
)
return solution_precs, specs_to_add, specs_to_remove
return final_precs, specs_to_add, specs_to_remove
# # TODO: check if solution is satisfiable, and emit warning if it's not
# elif ssc.update_modifier == UpdateModifier.UPDATE_DEPS:
# # Here we have to SAT solve again :( It's only now that we know the dependency
# # chain of specs_to_add.
# #
# # UPDATE_DEPS is effectively making each spec in the dependency chain a user-requested
# # spec. We don't modify pinned_specs, track_features_specs, or specs_to_add. For
# # all other specs, we drop all information but name, drop target, and add them to
# # the specs_to_add that gets recorded in the history file.
# #
# # It's like UPDATE_ALL, but only for certain dependency chains.
# graph = PrefixGraph(ssc.solution_precs)
# update_names = set()
# for spec in specs_to_add:
# node = graph.get_node_by_name(spec.name)
# update_names.update(ancest_rec.name for ancest_rec in graph.all_ancestors(node))
# specs_map = {name: MatchSpec(name) for name in update_names}
# # Remove pinned_specs and any python spec (due to major-minor pinning business rule).
# # Add in the original specs_to_add on top.
# for spec in ssc.pinned_specs:
# specs_map.pop(spec.name, None)
# if "python" in specs_map:
# python_rec = prefix_data.get("python")
# py_ver = ".".join(python_rec.version.split(".")[:2]) + ".*"
# specs_map["python"] = MatchSpec(name="python", version=py_ver)
# specs_map.update({spec.name: spec for spec in specs_to_add})
# new_specs_to_add = tuple(itervalues(specs_map))
# # It feels wrong/unsafe to modify this instance, but I guess let's go with it for now.
# specs_to_add = new_specs_to_add
# ssc.solution_precs = self.solve_final_state(
# update_modifier=UpdateModifier.UPDATE_SPECS,
# deps_modifier=ssc.deps_modifier,
# prune=ssc.prune,
# ignore_pinned=ssc.ignore_pinned,
# force_remove=ssc.force_remove
# )
# ssc.prune = False
# if ssc.prune:
# graph = PrefixGraph(ssc.solution_precs, final_environment_specs)
# graph.prune()
# ssc.solution_precs = tuple(graph.graph)
# return ssc
| [
"conda._vendor.boltons.setutils.IndexedSet",
"conda.models.match_spec.MatchSpec",
"conda.models.prefix_graph.PrefixGraph"
] | [((1540, 1636), 'conda._vendor.boltons.setutils.IndexedSet', 'IndexedSet', (['(prec for prec in _no_deps_solution if prec.name not in\n remove_before_adding_back)'], {}), '(prec for prec in _no_deps_solution if prec.name not in\n remove_before_adding_back)\n', (1550, 1636), False, 'from conda._vendor.boltons.setutils import IndexedSet\n'), ((2724, 2762), 'conda.models.prefix_graph.PrefixGraph', 'PrefixGraph', (['final_precs', 'specs_to_add'], {}), '(final_precs, specs_to_add)\n', (2735, 2762), False, 'from conda.models.prefix_graph import PrefixGraph\n'), ((3040, 3054), 'conda.models.match_spec.MatchSpec', 'MatchSpec', (['dep'], {}), '(dep)\n', (3049, 3054), False, 'from conda.models.match_spec import MatchSpec\n')] |
#!/usr/bin/env python
import unittest
import logging
import importlib
import copy
import os
from mock import patch
from nose.tools import raises
logging.disable(logging.CRITICAL)
ciftify_recon_all = importlib.import_module('ciftify.bin.ciftify_recon_all')
class ConvertFreesurferSurface(unittest.TestCase):
meshes = ciftify_recon_all.define_meshes('/somewhere/hcp/subject_1',
"164", ["32"], '/tmp/temp_dir', False)
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_option_adds_to_set_structure_command(self, mock_run):
secondary_type = 'GRAY_WHITE'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
surface_secondary_type=secondary_type)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' in args
assert secondary_type in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_secondary_type_not_set_if_option_not_used(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
set_structure_present = False
for item in arg_list:
args = item[0][0]
if '-set-structure' in args:
set_structure_present = True
assert '-surface-secondary-type' not in args
# If this fails the wb_command -set-structure call is not being made
# at all. Is expected at least once regardless of secondary-type option
assert set_structure_present
@patch('ciftify.bin.ciftify_recon_all.run')
def test_wbcommand_surface_apply_affine_called_when_cras_option_set(self,
mock_run):
cras_file = '/somewhere/cras.mat'
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
cras_mat=cras_file)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args and cras_file in args:
surface_apply_calls += 1
# The wb_command -surface-apply-affine command should be run once for
# each hemisphere
assert surface_apply_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_no_wbcommand_added_when_cras_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'])
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
surface_apply_calls = 0
for item in arg_list:
args = item[0][0]
if '-surface-apply-affine' in args:
surface_apply_calls += 1
assert surface_apply_calls == 0
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_adds_wbcommand_call(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=True)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
# Should add one call for each hemisphere
assert spec_added_calls == 2
@patch('ciftify.bin.ciftify_recon_all.run')
def test_add_to_spec_option_not_present_when_option_not_set(self, mock_run):
ciftify_recon_all.convert_freesurfer_surface('subject_1', 'white', 'ANATOMICAL',
'/somewhere/freesurfer/subject_1', self.meshes['T1wNative'],
add_to_spec=False)
assert mock_run.call_count >= 1
arg_list = mock_run.call_args_list
spec_added_calls = 0
for item in arg_list:
args = item[0][0]
if '-add-to-spec-file' in args:
spec_added_calls += 1
assert spec_added_calls == 0
class CreateRegSphere(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run_MSMSulc_registration')
@patch('ciftify.bin.ciftify_recon_all.run_fs_reg_LR')
def test_reg_sphere_is_not_set_to_none_for_any_mode(self, mock_fs_reg,
mock_msm_reg):
"""
Should fail if MSMSulc registration is implemented without supplying a
value for reg_sphere
"""
# settings stub, to allow tests to be written.
class Settings(object):
def __init__(self, name):
self.high_res = 999
self.reg_name = name
self.ciftify_data_dir = '/somedir/'
self.msm_config = None
# Test reg_sphere set when in FS mode
settings = Settings('FS')
meshes = {'AtlasSpaceNative' : ''}
subject_id = 'some_id'
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
# Test reg_sphere set when in MSMSulc mode
settings = Settings('MSMSulc')
reg_sphere = ciftify_recon_all.create_reg_sphere(settings, subject_id, meshes)
assert reg_sphere is not None
class CopyAtlasRoiFromTemplate(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.link_to_template_file')
def test_does_nothing_when_roi_src_does_not_exist(self, mock_link):
hcp_dir = '/somepath/hcp'
hcp_templates_dir = '/someotherpath/ciftify/data'
mesh_settings = {'meshname' : 'some_mesh'}
subject_id = 'some_id'
ciftify_recon_all.copy_atlas_roi_from_template(hcp_dir, hcp_templates_dir,
subject_id, mesh_settings)
assert mock_link.call_count == 0
class DilateAndMaskMetric(unittest.TestCase):
@patch('ciftify.bin.ciftify_recon_all.run')
def test_does_nothing_when_dscalars_map_doesnt_mask_medial_wall(self,
mock_run):
# Stubs to allow testing
dscalars = {'some_map' : {'mask_medialwall' : False}}
mesh = {'tmpdir' : '/tmp/temp_dir',
'meshname' : 'some_mesh'}
ciftify_recon_all.dilate_and_mask_metric('some_id', mesh, dscalars)
assert mock_run.call_count == 0
class TestSettings(unittest.TestCase):
arguments = {'--hcp-data-dir' : '/somepath/pipelines/hcp',
'--fs-subjects-dir' : '/somepath/pipelines/freesurfer',
'--resample-LowRestoNative' : False,
'<Subject>' : 'STUDY_SITE_ID_01',
'--settings-yaml' : None,
'--T2': False,
'--MSMSulc': False,
'--MSM-config': None}
yaml_config = {'high_res' : "164",
'low_res' : ["32"],
'grayord_res' : [2],
'dscalars' : {},
'registration' : {'src_dir' : 'T1w',
'dest_dir' : 'MNINonLinear',
'xfms_dir' : 'MNINonLinear/xfms'},
'FSL_fnirt' : {'2mm' : {'FNIRTConfig' : 'etc/flirtsch/T1_2_MNI152_2mm.cnf'}}}
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_fs_root_dir_set_to_user_value_when_given(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.fs_root_dir == self.arguments['--fs-subjects-dir']
@raises(SystemExit)
@patch('ciftify.config.find_freesurfer_data')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_when_no_fs_dir_given_and_cannot_find_shell_value(self,
mock_ciftify, mock_fsl, mock_exists, mock_fs):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--fs-subjects-dir'] = None
# Just in case the shell environment has the variable set...
mock_fs.return_value = None
settings = ciftify_recon_all.Settings(args_copy)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_fsl_dir_cannot_be_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_fsl.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
# Should never reach this line
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_not_found(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to the mock directories not
# existing.
mock_exists.return_value = True
mock_ciftify.return_value = None
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_ciftify_data_dir_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
ciftify_data = '/somepath/ciftify/data'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = ciftify_data
mock_fsl.return_value = '/somepath/FSL'
mock_exists.side_effect = lambda path : False if path == ciftify_data else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_default_config_read_when_no_config_yaml_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
config = settings._Settings__config
assert config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_yaml_config_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
yaml_file = '/somepath/fake_config.yaml'
mock_exists.side_effect = lambda path: False if path == yaml_file else True
# work with a deep copy of arguments to avoid modifications having any
# effect on later tests
args_copy = copy.deepcopy(self.arguments)
args_copy['--settings-yaml'] = yaml_file
settings = ciftify_recon_all.Settings(args_copy)
assert False
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_dscalars_doesnt_contain_msmsulc_settings_when_reg_name_is_FS(
self, mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
if settings.reg_name == 'FS':
assert 'ArealDistortion_MSMSulc' not in settings.dscalars.keys()
else:
assert True
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_none_in_fs_mode(self, mock_ciftify, mock_fsl,
mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
settings = ciftify_recon_all.Settings(self.arguments)
assert settings.msm_config is None
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_msm_config_set_to_default_when_user_config_not_given(self,
mock_ciftify, mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Modify copy of arguments, so changes dont effect other tests
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = None
settings = ciftify_recon_all.Settings(args)
assert settings.msm_config is not None
@raises(SystemExit)
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_sys_exit_raised_when_user_msm_config_doesnt_exist(self, mock_ciftify,
mock_fsl, mock_exists):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
user_config = "/some/path/nonexistent_config"
mock_exists.side_effect = lambda path: False if path == user_config else True
args = copy.deepcopy(self.arguments)
args['--MSMSulc'] = True
args['--MSM-config'] = user_config
settings = ciftify_recon_all.Settings(args)
# Test should never reach this line
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_expected_registration_path_missing(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['registration']['src_dir']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_resolution_not_defined_for_given_method(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = '/somepath/FSL'
# This is to avoid sys.exit calls due to mock directories not
# existing.
mock_exists.return_value = True
# Use copy to avoid side effects in other tests
yaml_copy = copy.deepcopy(self.yaml_config)
del yaml_copy['FSL_fnirt']['2mm']
mock_yaml_settings.return_value = yaml_copy
settings = ciftify_recon_all.Settings(self.arguments)
assert False
@raises(SystemExit)
@patch('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')
@patch('os.path.exists')
@patch('ciftify.config.find_fsl')
@patch('ciftify.config.find_ciftify_global')
def test_exits_gracefully_when_registration_resolution_file_doesnt_exist(self,
mock_ciftify, mock_fsl, mock_exists, mock_yaml_settings):
fsl_dir = '/somepath/FSL'
# This is to avoid test failure if shell environment changes
mock_ciftify.return_value = '/somepath/ciftify/data'
mock_fsl.return_value = fsl_dir
mock_yaml_settings.return_value = self.yaml_config
required_file = os.path.join(os.path.dirname(fsl_dir),
self.yaml_config['FSL_fnirt']['2mm']['FNIRTConfig'])
mock_exists.side_effect = lambda x: False if x == required_file else True
settings = ciftify_recon_all.Settings(self.arguments)
assert False
| [
"mock.patch",
"importlib.import_module",
"os.path.dirname",
"nose.tools.raises",
"copy.deepcopy",
"logging.disable"
] | [((147, 180), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (162, 180), False, 'import logging\n'), ((202, 258), 'importlib.import_module', 'importlib.import_module', (['"""ciftify.bin.ciftify_recon_all"""'], {}), "('ciftify.bin.ciftify_recon_all')\n", (225, 258), False, 'import importlib\n'), ((441, 483), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (446, 483), False, 'from mock import patch\n'), ((1397, 1439), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (1402, 1439), False, 'from mock import patch\n'), ((2208, 2250), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (2213, 2250), False, 'from mock import patch\n'), ((3034, 3076), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (3039, 3076), False, 'from mock import patch\n'), ((3630, 3672), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (3635, 3672), False, 'from mock import patch\n'), ((4291, 4333), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (4296, 4333), False, 'from mock import patch\n'), ((4957, 5020), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run_MSMSulc_registration"""'], {}), "('ciftify.bin.ciftify_recon_all.run_MSMSulc_registration')\n", (4962, 5020), False, 'from mock import patch\n'), ((5026, 5078), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run_fs_reg_LR"""'], {}), "('ciftify.bin.ciftify_recon_all.run_fs_reg_LR')\n", (5031, 5078), False, 'from mock import patch\n'), ((6157, 6217), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.link_to_template_file"""'], {}), "('ciftify.bin.ciftify_recon_all.link_to_template_file')\n", (6162, 6217), False, 'from mock import patch\n'), ((6685, 6727), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.run"""'], {}), "('ciftify.bin.ciftify_recon_all.run')\n", (6690, 6727), False, 'from mock import patch\n'), ((7958, 7981), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (7963, 7981), False, 'from mock import patch\n'), ((7987, 8019), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (7992, 8019), False, 'from mock import patch\n'), ((8025, 8068), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (8030, 8068), False, 'from mock import patch\n'), ((8636, 8654), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (8642, 8654), False, 'from nose.tools import raises\n'), ((8660, 8704), 'mock.patch', 'patch', (['"""ciftify.config.find_freesurfer_data"""'], {}), "('ciftify.config.find_freesurfer_data')\n", (8665, 8704), False, 'from mock import patch\n'), ((8710, 8733), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (8715, 8733), False, 'from mock import patch\n'), ((8739, 8771), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (8744, 8771), False, 'from mock import patch\n'), ((8777, 8820), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (8782, 8820), False, 'from mock import patch\n'), ((9703, 9721), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (9709, 9721), False, 'from nose.tools import raises\n'), ((9727, 9750), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (9732, 9750), False, 'from mock import patch\n'), ((9756, 9788), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (9761, 9788), False, 'from mock import patch\n'), ((9794, 9837), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (9799, 9837), False, 'from mock import patch\n'), ((10383, 10401), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (10389, 10401), False, 'from nose.tools import raises\n'), ((10407, 10430), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (10412, 10430), False, 'from mock import patch\n'), ((10436, 10468), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (10441, 10468), False, 'from mock import patch\n'), ((10474, 10517), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (10479, 10517), False, 'from mock import patch\n'), ((11018, 11036), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (11024, 11036), False, 'from nose.tools import raises\n'), ((11042, 11065), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (11047, 11065), False, 'from mock import patch\n'), ((11071, 11103), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (11076, 11103), False, 'from mock import patch\n'), ((11109, 11152), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (11114, 11152), False, 'from mock import patch\n'), ((11668, 11691), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (11673, 11691), False, 'from mock import patch\n'), ((11697, 11729), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (11702, 11729), False, 'from mock import patch\n'), ((11735, 11778), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (11740, 11778), False, 'from mock import patch\n'), ((12350, 12368), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (12356, 12368), False, 'from nose.tools import raises\n'), ((12374, 12397), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (12379, 12397), False, 'from mock import patch\n'), ((12403, 12435), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (12408, 12435), False, 'from mock import patch\n'), ((12441, 12484), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (12446, 12484), False, 'from mock import patch\n'), ((13213, 13236), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (13218, 13236), False, 'from mock import patch\n'), ((13242, 13274), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (13247, 13274), False, 'from mock import patch\n'), ((13280, 13323), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (13285, 13323), False, 'from mock import patch\n'), ((13986, 14009), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (13991, 14009), False, 'from mock import patch\n'), ((14015, 14047), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (14020, 14047), False, 'from mock import patch\n'), ((14053, 14096), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (14058, 14096), False, 'from mock import patch\n'), ((14621, 14644), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (14626, 14644), False, 'from mock import patch\n'), ((14650, 14682), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (14655, 14682), False, 'from mock import patch\n'), ((14688, 14731), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (14693, 14731), False, 'from mock import patch\n'), ((15454, 15472), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (15460, 15472), False, 'from nose.tools import raises\n'), ((15478, 15501), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (15483, 15501), False, 'from mock import patch\n'), ((15507, 15539), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (15512, 15539), False, 'from mock import patch\n'), ((15545, 15588), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (15550, 15588), False, 'from mock import patch\n'), ((16274, 16292), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (16280, 16292), False, 'from nose.tools import raises\n'), ((16298, 16370), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings"""'], {}), "('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')\n", (16303, 16370), False, 'from mock import patch\n'), ((16376, 16399), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (16381, 16399), False, 'from mock import patch\n'), ((16405, 16437), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (16410, 16437), False, 'from mock import patch\n'), ((16443, 16486), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (16448, 16486), False, 'from mock import patch\n'), ((17241, 17259), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (17247, 17259), False, 'from nose.tools import raises\n'), ((17265, 17337), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings"""'], {}), "('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')\n", (17270, 17337), False, 'from mock import patch\n'), ((17343, 17366), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (17348, 17366), False, 'from mock import patch\n'), ((17372, 17404), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (17377, 17404), False, 'from mock import patch\n'), ((17410, 17453), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (17415, 17453), False, 'from mock import patch\n'), ((18206, 18224), 'nose.tools.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (18212, 18224), False, 'from nose.tools import raises\n'), ((18230, 18302), 'mock.patch', 'patch', (['"""ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings"""'], {}), "('ciftify.bin.ciftify_recon_all.Settings._Settings__read_settings')\n", (18235, 18302), False, 'from mock import patch\n'), ((18308, 18331), 'mock.patch', 'patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (18313, 18331), False, 'from mock import patch\n'), ((18337, 18369), 'mock.patch', 'patch', (['"""ciftify.config.find_fsl"""'], {}), "('ciftify.config.find_fsl')\n", (18342, 18369), False, 'from mock import patch\n'), ((18375, 18418), 'mock.patch', 'patch', (['"""ciftify.config.find_ciftify_global"""'], {}), "('ciftify.config.find_ciftify_global')\n", (18380, 18418), False, 'from mock import patch\n'), ((9398, 9427), 'copy.deepcopy', 'copy.deepcopy', (['self.arguments'], {}), '(self.arguments)\n', (9411, 9427), False, 'import copy\n'), ((13049, 13078), 'copy.deepcopy', 'copy.deepcopy', (['self.arguments'], {}), '(self.arguments)\n', (13062, 13078), False, 'import copy\n'), ((15249, 15278), 'copy.deepcopy', 'copy.deepcopy', (['self.arguments'], {}), '(self.arguments)\n', (15262, 15278), False, 'import copy\n'), ((16044, 16073), 'copy.deepcopy', 'copy.deepcopy', (['self.arguments'], {}), '(self.arguments)\n', (16057, 16073), False, 'import copy\n'), ((17018, 17049), 'copy.deepcopy', 'copy.deepcopy', (['self.yaml_config'], {}), '(self.yaml_config)\n', (17031, 17049), False, 'import copy\n'), ((17990, 18021), 'copy.deepcopy', 'copy.deepcopy', (['self.yaml_config'], {}), '(self.yaml_config)\n', (18003, 18021), False, 'import copy\n'), ((18873, 18897), 'os.path.dirname', 'os.path.dirname', (['fsl_dir'], {}), '(fsl_dir)\n', (18888, 18897), False, 'import os\n')] |
from django.db import models
"""
ShipmentModels have a one to many relationship with boxes and aliquot
Aliquot and Box foreign keys to a ShipmentModel determine manifest contents
for shipping purposes (resolved in schema return for manifest view)
"""
class ShipmentModel(models.Model):
carrier = models.ForeignKey('CarrierModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
shipment_number = models.CharField(max_length=255, blank=True, null=True)
# TODO What should we do if a destination is removed?
destination = models.ForeignKey('DestinationModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
sent_date = models.DateTimeField(blank=True, null=True)
received_date = models.DateTimeField(blank=True, null=True)
notes = models.CharField(max_length=255, blank=True, null=True)
class DestinationModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class CarrierModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| [
"django.db.models.DateTimeField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((315, 402), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""CarrierModel"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), "('CarrierModel', on_delete=models.SET_NULL, blank=True,\n null=True)\n", (332, 402), False, 'from django.db import models\n'), ((517, 572), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (533, 572), False, 'from django.db import models\n'), ((649, 740), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""DestinationModel"""'], {'on_delete': 'models.SET_NULL', 'blank': '(True)', 'null': '(True)'}), "('DestinationModel', on_delete=models.SET_NULL, blank=True,\n null=True)\n", (666, 740), False, 'from django.db import models\n'), ((861, 904), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (881, 904), False, 'from django.db import models\n'), ((925, 968), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (945, 968), False, 'from django.db import models\n'), ((981, 1036), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (997, 1036), False, 'from django.db import models\n'), ((1087, 1119), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1103, 1119), False, 'from django.db import models\n'), ((1216, 1248), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1232, 1248), False, 'from django.db import models\n')] |
"""
Consuming Iterator manually
"""
from collections import namedtuple
def cast(data_type, value):
"""Cast the value into a correct data type"""
if data_type == 'DOUBLE':
return float(value)
elif data_type == 'STRING':
return str(value)
elif data_type == 'INT':
return int(value)
def cast_row(data_types1, data_row):
return [
cast(data_type, value)
for data_type, value in zip(data_types1, data_row)
]
# cars = []
# with open('cars.csv') as file:
# row_index = 0
# for line in file:
# if row_index == 0:
# # Header row
# headers = line.strip('\n').split(';')
# Car = namedtuple('Car', headers)
# elif row_index == 1:
# data_types = line.strip('\n').split(';')
# # print('types', data_types)
# else:
# # data row
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
# # print(data)
# row_index += 1
# with open('cars.csv') as file:
# file_iter = iter(file)
# headers = next(file_iter).strip('\n').split(';')
# Car = namedtuple('Car', headers)
# data_types = next(file_iter).strip('\n').split(';')
# for line in file_iter:
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
with open('cars.csv') as file:
file_iter = iter(file)
headers = next(file_iter).strip('\n').split(';')
Car = namedtuple('Car', headers)
data_types = next(file_iter).strip('\n').split(';')
cars = [Car(*cast_row(
data_types,
line.strip('\n').split(';')
))
for line in file_iter]
print(cars)
| [
"collections.namedtuple"
] | [((1608, 1634), 'collections.namedtuple', 'namedtuple', (['"""Car"""', 'headers'], {}), "('Car', headers)\n", (1618, 1634), False, 'from collections import namedtuple\n')] |
import numpy as np
import math
import logging
from termcolor import colored
# Check a matrix for: negative eigenvalues, asymmetry and negative diagonal values
def positive_definite(M,epsilon = 0.000001,verbose=False):
# Symmetrization
Mt = np.transpose(M)
M = (M + Mt)/2
eigenvalues = np.linalg.eigvals(M)
for i in range(len(eigenvalues)):
if eigenvalues[i] <= epsilon:
if verbose:
logging.error("Negative eigenvalues")
return 0
for i in range(M.shape[0]):
if M[i][i] < 0:
if verbose:
logging.error("Negative value in diagonal")
return 0
return 1
| [
"numpy.linalg.eigvals",
"numpy.transpose",
"logging.error"
] | [((249, 264), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (261, 264), True, 'import numpy as np\n'), ((302, 322), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['M'], {}), '(M)\n', (319, 322), True, 'import numpy as np\n'), ((439, 476), 'logging.error', 'logging.error', (['"""Negative eigenvalues"""'], {}), "('Negative eigenvalues')\n", (452, 476), False, 'import logging\n'), ((594, 637), 'logging.error', 'logging.error', (['"""Negative value in diagonal"""'], {}), "('Negative value in diagonal')\n", (607, 637), False, 'import logging\n')] |
"""This module contains a function for validating a scratch config entry."""
import re
from idact.detail.config.validation.validation_error_message import \
validation_error_message
VALID_SCRATCH_DESCRIPTION = 'Non-empty absolute path, or environment' \
' variable name.'
VALID_SCRATCH_REGEX = r"^(/.*)|(\$[A-Za-z][A-Za-z0-9]*)$" # noqa, pylint: disable=line-too-long
__COMPILED = re.compile(pattern=VALID_SCRATCH_REGEX)
def validate_scratch(scratch) -> str:
"""Returns the parameter if it's a valid scratch config entry, otherwise
raises an exception.
Key path is optional, non-empty string.
:param scratch: Object to validate.
:raises TypeError: On wrong type.
:raises ValueError: On regex mismatch.
"""
if not isinstance(scratch, str):
raise TypeError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
if not __COMPILED.match(scratch):
raise ValueError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
return scratch
| [
"idact.detail.config.validation.validation_error_message.validation_error_message",
"re.compile"
] | [((419, 458), 're.compile', 're.compile', ([], {'pattern': 'VALID_SCRATCH_REGEX'}), '(pattern=VALID_SCRATCH_REGEX)\n', (429, 458), False, 'import re\n'), ((860, 984), 'idact.detail.config.validation.validation_error_message.validation_error_message', 'validation_error_message', ([], {'label': '"""scratch"""', 'value': 'scratch', 'expected': 'VALID_SCRATCH_DESCRIPTION', 'regex': 'VALID_SCRATCH_REGEX'}), "(label='scratch', value=scratch, expected=\n VALID_SCRATCH_DESCRIPTION, regex=VALID_SCRATCH_REGEX)\n", (884, 984), False, 'from idact.detail.config.validation.validation_error_message import validation_error_message\n'), ((1094, 1218), 'idact.detail.config.validation.validation_error_message.validation_error_message', 'validation_error_message', ([], {'label': '"""scratch"""', 'value': 'scratch', 'expected': 'VALID_SCRATCH_DESCRIPTION', 'regex': 'VALID_SCRATCH_REGEX'}), "(label='scratch', value=scratch, expected=\n VALID_SCRATCH_DESCRIPTION, regex=VALID_SCRATCH_REGEX)\n", (1118, 1218), False, 'from idact.detail.config.validation.validation_error_message import validation_error_message\n')] |
import warnings
from typing import Callable, List, Optional, Union
import mpmath
import numpy as np
import paramak
import sympy as sp
from paramak import RotateMixedShape, diff_between_angles
from paramak.parametric_components.tokamak_plasma_plasmaboundaries import \
PlasmaBoundaries
from scipy.interpolate import interp1d
class BlanketFP(RotateMixedShape):
"""A blanket volume created from plasma parameters.
Args:
thickness (float or [float] or callable or [(float), (float)]):
the thickness of the blanket (cm). If the thickness is a float then
this produces a blanket of constant thickness. If the thickness is
a tuple of floats, blanket thickness will vary linearly between the
two values. If thickness is callable, then the blanket thickness
will be a function of poloidal angle (in degrees). If thickness is
a list of two lists (thicknesses and angles) then these will be
used together with linear interpolation.
start_angle: the angle in degrees to start the blanket, measured anti
clockwise from 3 o'clock.
stop_angle: the angle in degrees to stop the blanket, measured anti
clockwise from 3 o'clock.
plasma: If not None, the parameters of the plasma Object will be used.
minor_radius: the minor radius of the plasma (cm).
major_radius: the major radius of the plasma (cm).
triangularity: the triangularity of the plasma.
elongation: the elongation of the plasma.
vertical_displacement: the vertical_displacement of the plasma (cm).
offset_from_plasma: the distance between the plasma and the blanket
(cm). If float, constant offset. If list of floats, offset will
vary linearly between the values. If callable, offset will be a
function of poloidal angle (in degrees). If a list of two lists
(angles and offsets) then these will be used together with linear
interpolation.
num_points: number of points that will describe the shape.
"""
def __init__(self,
thickness,
start_angle: float,
stop_angle: float,
plasma: Optional[Union[paramak.Plasma,
paramak.PlasmaBoundaries,
paramak.PlasmaFromPoints]] = None,
minor_radius: Optional[float] = 150.0,
major_radius: Optional[float] = 450.0,
triangularity: Optional[float] = 0.55,
elongation: Optional[float] = 2.0,
vertical_displacement: Optional[float] = 0.0,
offset_from_plasma: Optional[float] = 0.0,
num_points: Optional[int] = 50,
**kwargs):
super().__init__(**kwargs)
self.thickness = thickness
self.start_angle, self.stop_angle = None, None
self.start_angle = start_angle
self.stop_angle = stop_angle
self.plasma = plasma
self.vertical_displacement = vertical_displacement
if plasma is None:
self.minor_radius = minor_radius
self.major_radius = major_radius
self.triangularity = triangularity
self.elongation = elongation
else: # if plasma object is given, use its parameters
self.minor_radius = plasma.minor_radius
self.major_radius = plasma.major_radius
self.triangularity = plasma.triangularity
self.elongation = plasma.elongation
self.offset_from_plasma = offset_from_plasma
self.num_points = num_points
@property
def start_angle(self):
return self._start_angle
@start_angle.setter
def start_angle(self, value):
self._start_angle = value
@property
def stop_angle(self):
return self._stop_angle
@stop_angle.setter
def stop_angle(self, value):
self._stop_angle = value
@property
def minor_radius(self):
return self._minor_radius
@minor_radius.setter
def minor_radius(self, minor_radius):
self._minor_radius = minor_radius
@property
def thickness(self):
return self._thickness
@thickness.setter
def thickness(self, thickness):
self._thickness = thickness
@property
def inner_points(self):
self.find_points()
return self._inner_points
@inner_points.setter
def inner_points(self, value):
self._inner_points = value
@property
def outer_points(self):
self.find_points()
return self._outer_points
@outer_points.setter
def outer_points(self, value):
self._outer_points = value
def make_callable(self, attribute):
"""This function transforms an attribute (thickness or offset) into a
callable function of theta
"""
# if the attribute is a list, create a interpolated object of the
# values
if isinstance(attribute, (tuple, list)):
if isinstance(attribute[0], (tuple, list)) and \
isinstance(attribute[1], (tuple, list)) and \
len(attribute) == 2:
# attribute is a list of 2 lists
if len(attribute[0]) != len(attribute[1]):
raise ValueError('The length of angles list must equal \
the length of values list')
list_of_angles = np.array(attribute[0])
offset_values = attribute[1]
else:
# no list of angles is given
offset_values = attribute
list_of_angles = np.linspace(
self.start_angle,
self.stop_angle,
len(offset_values),
endpoint=True)
interpolated_values = interp1d(list_of_angles, offset_values)
def fun(theta):
if callable(attribute):
return attribute(theta)
elif isinstance(attribute, (tuple, list)):
return interpolated_values(theta)
else:
return attribute
return fun
def find_points(self, angles=None):
self._overlapping_shape = False
# create array of angles theta
if angles is None:
thetas = np.linspace(
self.start_angle,
self.stop_angle,
num=self.num_points,
endpoint=True,
)
else:
thetas = angles
# create inner points
inner_offset = self.make_callable(self.offset_from_plasma)
inner_points = self.create_offset_points(thetas, inner_offset)
inner_points[-1][2] = "straight"
self.inner_points = inner_points
# create outer points
thickness = self.make_callable(self.thickness)
def outer_offset(theta):
return inner_offset(theta) + thickness(theta)
outer_points = self.create_offset_points(np.flip(thetas), outer_offset)
outer_points[-1][2] = "straight"
self.outer_points = outer_points
# assemble
points = inner_points + outer_points
if self._overlapping_shape:
msg = ("BlanketFP: Some points with negative R coordinate have "
"been ignored.")
warnings.warn(msg)
self.points = points
return points
def create_offset_points(self, thetas, offset):
"""generates a list of points following parametric equations with an
offset
Args:
thetas (np.array): the angles in degrees.
offset (callable): offset value (cm). offset=0 will follow the
parametric equations.
Returns:
list: list of points [[R1, Z1, connection1], [R2, Z2, connection2],
...]
"""
# create sympy objects and derivatives
theta_sp = sp.Symbol("theta")
R_sp, Z_sp = self.distribution(theta_sp, pkg=sp)
R_derivative = sp.diff(R_sp, theta_sp)
Z_derivative = sp.diff(Z_sp, theta_sp)
points = []
for theta in thetas:
# get local value of derivatives
val_R_derivative = float(R_derivative.subs("theta", theta))
val_Z_derivative = float(Z_derivative.subs("theta", theta))
# get normal vector components
nx = val_Z_derivative
ny = -val_R_derivative
# normalise normal vector
normal_vector_norm = (nx ** 2 + ny ** 2) ** 0.5
nx /= normal_vector_norm
ny /= normal_vector_norm
# calculate outer points
val_R_outer = self.distribution(theta)[0] + offset(theta) * nx
val_Z_outer = self.distribution(theta)[1] + offset(theta) * ny
if float(val_R_outer) > 0:
points.append(
[float(val_R_outer), float(val_Z_outer), "spline"])
else:
self._overlapping_shape = True
return points
def distribution(self, theta, pkg=np):
"""Plasma distribution theta in degrees
Args:
theta (float or np.array or sp.Symbol): the angle(s) in degrees.
pkg (module, optional): Module to use in the funciton. If sp, as
sympy object will be returned. If np, a np.array or a float
will be returned. Defaults to np.
Returns:
(float, float) or (sympy.Add, sympy.Mul) or
(numpy.array, numpy.array): The R and Z coordinates of the
point with angle theta
"""
if pkg == np:
theta = np.radians(theta)
else:
theta = mpmath.radians(theta)
R = self.major_radius + self.minor_radius * pkg.cos(
theta + self.triangularity * pkg.sin(theta)
)
Z = (
self.elongation * self.minor_radius * pkg.sin(theta)
+ self.vertical_displacement
)
return R, Z
| [
"numpy.radians",
"numpy.flip",
"sympy.Symbol",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linspace",
"mpmath.radians",
"sympy.diff",
"warnings.warn"
] | [((8042, 8060), 'sympy.Symbol', 'sp.Symbol', (['"""theta"""'], {}), "('theta')\n", (8051, 8060), True, 'import sympy as sp\n'), ((8142, 8165), 'sympy.diff', 'sp.diff', (['R_sp', 'theta_sp'], {}), '(R_sp, theta_sp)\n', (8149, 8165), True, 'import sympy as sp\n'), ((8189, 8212), 'sympy.diff', 'sp.diff', (['Z_sp', 'theta_sp'], {}), '(Z_sp, theta_sp)\n', (8196, 8212), True, 'import sympy as sp\n'), ((5945, 5984), 'scipy.interpolate.interp1d', 'interp1d', (['list_of_angles', 'offset_values'], {}), '(list_of_angles, offset_values)\n', (5953, 5984), False, 'from scipy.interpolate import interp1d\n'), ((6429, 6515), 'numpy.linspace', 'np.linspace', (['self.start_angle', 'self.stop_angle'], {'num': 'self.num_points', 'endpoint': '(True)'}), '(self.start_angle, self.stop_angle, num=self.num_points,\n endpoint=True)\n', (6440, 6515), True, 'import numpy as np\n'), ((7112, 7127), 'numpy.flip', 'np.flip', (['thetas'], {}), '(thetas)\n', (7119, 7127), True, 'import numpy as np\n'), ((7451, 7469), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7464, 7469), False, 'import warnings\n'), ((9784, 9801), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (9794, 9801), True, 'import numpy as np\n'), ((9836, 9857), 'mpmath.radians', 'mpmath.radians', (['theta'], {}), '(theta)\n', (9850, 9857), False, 'import mpmath\n'), ((5542, 5564), 'numpy.array', 'np.array', (['attribute[0]'], {}), '(attribute[0])\n', (5550, 5564), True, 'import numpy as np\n')] |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import datetime
import json
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union
import plaid
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from plaid.api import plaid_api
from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest
from plaid.model.transactions_get_request import TransactionsGetRequest
SPEC_ENV_TO_PLAID_ENV = {
"production": plaid.Environment.Production,
"development": plaid.Environment.Development,
"sandbox": plaid.Environment.Sandbox,
}
class PlaidStream(Stream):
def __init__(self, config: Mapping[str, Any]):
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
self.client = plaid_api.PlaidApi(api_client)
self.access_token = config["access_token"]
class BalanceStream(PlaidStream):
@property
def name(self):
return "balance"
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "account_id"
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
balance_response = self.client.accounts_balance_get(AccountsBalanceGetRequest(access_token=self.access_token))
for balance in balance_response["accounts"]:
message_dict = balance["balances"].to_dict()
message_dict["account_id"] = balance["account_id"]
yield message_dict
class IncrementalTransactionStream(PlaidStream):
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return "transaction_id"
@property
def name(self):
return "transaction"
@property
def source_defined_cursor(self) -> bool:
return True
@property
def cursor_field(self) -> Union[str, List[str]]:
return "date"
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
return {"date": latest_record.get("date")}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
date = stream_state.get("date")
if not date:
date = datetime.date.fromtimestamp(0)
else:
date = datetime.date.fromisoformat(date)
if date >= datetime.datetime.utcnow().date():
return
transaction_response = self.client.transactions_get(
TransactionsGetRequest(access_token=self.access_token, start_date=date, end_date=datetime.datetime.utcnow().date())
)
yield from map(lambda x: x.to_dict(), sorted(transaction_response["transactions"], key=lambda t: t["date"]))
class SourcePlaid(AbstractSource):
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Optional[Any]]:
try:
plaid_config = plaid.Configuration(
host=SPEC_ENV_TO_PLAID_ENV[config["plaid_env"]], api_key={"clientId": config["client_id"], "secret": config["api_key"]}
)
api_client = plaid.ApiClient(plaid_config)
client = plaid_api.PlaidApi(api_client)
try:
request = AccountsBalanceGetRequest(access_token=config["access_token"])
client.accounts_balance_get(request)
return True, None
except plaid.ApiException as e:
response = json.loads(e.body)
return False, response
except Exception as error:
return False, error
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
return [BalanceStream(config), IncrementalTransactionStream(config)]
| [
"plaid.model.accounts_balance_get_request.AccountsBalanceGetRequest",
"plaid.api.plaid_api.PlaidApi",
"json.loads",
"datetime.datetime.utcnow",
"datetime.date.fromtimestamp",
"plaid.Configuration",
"datetime.date.fromisoformat",
"plaid.ApiClient"
] | [((826, 970), 'plaid.Configuration', 'plaid.Configuration', ([], {'host': "SPEC_ENV_TO_PLAID_ENV[config['plaid_env']]", 'api_key': "{'clientId': config['client_id'], 'secret': config['api_key']}"}), "(host=SPEC_ENV_TO_PLAID_ENV[config['plaid_env']],\n api_key={'clientId': config['client_id'], 'secret': config['api_key']})\n", (845, 970), False, 'import plaid\n'), ((1010, 1039), 'plaid.ApiClient', 'plaid.ApiClient', (['plaid_config'], {}), '(plaid_config)\n', (1025, 1039), False, 'import plaid\n'), ((1062, 1092), 'plaid.api.plaid_api.PlaidApi', 'plaid_api.PlaidApi', (['api_client'], {}), '(api_client)\n', (1080, 1092), False, 'from plaid.api import plaid_api\n'), ((1661, 1718), 'plaid.model.accounts_balance_get_request.AccountsBalanceGetRequest', 'AccountsBalanceGetRequest', ([], {'access_token': 'self.access_token'}), '(access_token=self.access_token)\n', (1686, 1718), False, 'from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest\n'), ((2863, 2893), 'datetime.date.fromtimestamp', 'datetime.date.fromtimestamp', (['(0)'], {}), '(0)\n', (2890, 2893), False, 'import datetime\n'), ((2927, 2960), 'datetime.date.fromisoformat', 'datetime.date.fromisoformat', (['date'], {}), '(date)\n', (2954, 2960), False, 'import datetime\n'), ((3541, 3685), 'plaid.Configuration', 'plaid.Configuration', ([], {'host': "SPEC_ENV_TO_PLAID_ENV[config['plaid_env']]", 'api_key': "{'clientId': config['client_id'], 'secret': config['api_key']}"}), "(host=SPEC_ENV_TO_PLAID_ENV[config['plaid_env']],\n api_key={'clientId': config['client_id'], 'secret': config['api_key']})\n", (3560, 3685), False, 'import plaid\n'), ((3737, 3766), 'plaid.ApiClient', 'plaid.ApiClient', (['plaid_config'], {}), '(plaid_config)\n', (3752, 3766), False, 'import plaid\n'), ((3788, 3818), 'plaid.api.plaid_api.PlaidApi', 'plaid_api.PlaidApi', (['api_client'], {}), '(api_client)\n', (3806, 3818), False, 'from plaid.api import plaid_api\n'), ((3862, 3924), 'plaid.model.accounts_balance_get_request.AccountsBalanceGetRequest', 'AccountsBalanceGetRequest', ([], {'access_token': "config['access_token']"}), "(access_token=config['access_token'])\n", (3887, 3924), False, 'from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest\n'), ((2980, 3006), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3004, 3006), False, 'import datetime\n'), ((4083, 4101), 'json.loads', 'json.loads', (['e.body'], {}), '(e.body)\n', (4093, 4101), False, 'import json\n'), ((3189, 3215), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3213, 3215), False, 'import datetime\n')] |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import math
from utils import logger
use_cuda = torch.cuda.is_available()
# utility
def to_var(x, dtype=None):
if type(x) is np.ndarray:
x = torch.from_numpy(x)
elif type(x) is list:
x = torch.from_numpy(np.array(x, dtype=dtype))
if use_cuda:
x = x.cuda()
return Variable(x)
# optimization
# reference: http://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#ReduceLROnPlateau
def adjusting_learning_rate(optimizer, factor=.5, min_lr=0.00001):
for i, param_group in enumerate(optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
def lr_annealing_function(step, start=0, end=1, r=0.9999, type="exp"):
if type == "exp":
lr = start - (start - end) * (1 - math.pow(r, step))
else:
print("not available %s annealing" % type)
return lr
def update_lr(optimizer, new_lr):
old_lr = optimizer.param_groups[0]['lr']
# logger.info("adjusting learning rate from %.6f to %.6f" % (old_lr, new_lr))
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = new_lr
def transformer_learning_rate(optimizer, model_dim, step_num, warmup_steps=4000):
for i, param_group in enumerate(optimizer.param_groups):
new_lr = model_dim**(-0.5) * min(step_num**(-0.5), step_num*warmup_steps**(-1.5))
old_lr = float(param_group['lr'])
# new_lr = max(old_lr*factor, min_lr)
param_group['lr'] = new_lr
logger.info('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))
# model save and loading
def load_model(asset_path, model, optimizer, restore_epoch=0):
if os.path.isfile(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)):
checkpoint = torch.load(os.path.join(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch))
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
current_step = checkpoint['current_step']
logger.info("restore model with %d epoch" % restore_epoch)
else:
logger.info("no checkpoint with %d epoch" % restore_epoch)
current_step = 0
return model, optimizer, current_step
# class weighted_BCELoss(Module):
# def __init__(self, mode):
# self.mode = mode
#
# def forward(self, input, target, weight=10):
# if not (input.size() == target.size()):
# raise ValueError("Target and input must have the same size. target size ({}) "
# "!= input size ({})".format(target.size(), input.size()))
# loss_matrix = - (torch.mul(target, input.log()) + torch.mul(1 - target, (1 - input).log()))
# one_matrix = Variable(torch.ones(input.size()))
# if use_cuda:
# one_matrix = one_matrix.cuda()
# if self.mode == 'one':
# weight_matrix = (weight - 1) * target + one_matrix
# elif self.mode == 'pitch':
#
# weighted_loss_matrix = torch.mul(loss_matrix, weight_matrix)
# return torch.mean(weighted_loss_matrix)
# loss
def weighted_binary_cross_entropy(output, target, weights=None, eps=1e-12):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output + eps)) + \
weights[0] * ((1 - target) * torch.log(1 - output + eps))
else:
loss = target * torch.log(output + eps) + (1 - target) * torch.log(1 - output + eps)
return torch.neg(torch.mean(loss))
def kl_divergence(mu, sig, num_latent_group=0, freebits_ratio=2., p_mu=None, p_sigma=None, eps=1e-8):
# calculate kl divergence between two normal distribution
# mu, sig, p_mu, p_sigma: batch_size * latent_size
batch_size = mu.size(0)
latent_size = mu.size(1)
mu_square = mu * mu
sig_square = sig * sig
if p_mu is None:
kl = 0.5 * (mu_square + sig_square - torch.log(sig_square + eps) - 1)
else:
p_sig_square = p_sigma * p_sigma
p_mu_diff_square = (mu - p_mu) * (mu - p_mu)
kl = (sig_square + p_mu_diff_square)/(2*p_sig_square)
kl += torch.log(p_sigma/sig + eps)
kl -= 0.5
if num_latent_group == 0:
kl = torch.sum(kl) / batch_size
else:
group_size = latent_size // num_latent_group
kl = kl.mean(0) # mean along batch dimension
kl = kl.view(-1, group_size).sum(1) # summation along group dimension
kl = torch.clamp(kl, min=freebits_ratio) # clipping kl value
kl = kl.sum()
return kl
def vae_loss(target, prediction, mu, sig,
num_latent_group=0, freebits_ratio=2., kl_ratio=1., p_mu=None, p_sigma=None):
rec_loss = F.binary_cross_entropy(prediction, target)
kl_loss = kl_divergence(mu, sig, num_latent_group, freebits_ratio, p_mu, p_sigma)
total_loss = rec_loss + kl_ratio * kl_loss
return total_loss, rec_loss, kl_loss
| [
"torch.log",
"torch.mean",
"math.pow",
"torch.nn.functional.binary_cross_entropy",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"utils.logger.info",
"torch.sum",
"torch.autograd.Variable",
"torch.clamp"
] | [((160, 185), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (183, 185), False, 'import torch\n'), ((419, 430), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (427, 430), False, 'from torch.autograd import Variable\n'), ((4962, 5004), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['prediction', 'target'], {}), '(prediction, target)\n', (4984, 5004), True, 'import torch.nn.functional as F\n'), ((267, 286), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (283, 286), False, 'import torch\n'), ((806, 881), 'utils.logger.info', 'logger.info', (["('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))"], {}), "('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))\n", (817, 881), False, 'from utils import logger\n'), ((1739, 1815), 'utils.logger.info', 'logger.info', (["('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))"], {}), "('adjusting learning rate from %.6f to %.6f' % (old_lr, new_lr))\n", (1750, 1815), False, 'from utils import logger\n'), ((1928, 2002), 'os.path.join', 'os.path.join', (['asset_path', '"""model"""', "('checkpoint_%d.pth.tar' % restore_epoch)"], {}), "(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)\n", (1940, 2002), False, 'import os\n'), ((2281, 2339), 'utils.logger.info', 'logger.info', (["('restore model with %d epoch' % restore_epoch)"], {}), "('restore model with %d epoch' % restore_epoch)\n", (2292, 2339), False, 'from utils import logger\n'), ((2358, 2416), 'utils.logger.info', 'logger.info', (["('no checkpoint with %d epoch' % restore_epoch)"], {}), "('no checkpoint with %d epoch' % restore_epoch)\n", (2369, 2416), False, 'from utils import logger\n'), ((3762, 3778), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (3772, 3778), False, 'import torch\n'), ((4390, 4420), 'torch.log', 'torch.log', (['(p_sigma / sig + eps)'], {}), '(p_sigma / sig + eps)\n', (4399, 4420), False, 'import torch\n'), ((4717, 4752), 'torch.clamp', 'torch.clamp', (['kl'], {'min': 'freebits_ratio'}), '(kl, min=freebits_ratio)\n', (4728, 4752), False, 'import torch\n'), ((2037, 2111), 'os.path.join', 'os.path.join', (['asset_path', '"""model"""', "('checkpoint_%d.pth.tar' % restore_epoch)"], {}), "(asset_path, 'model', 'checkpoint_%d.pth.tar' % restore_epoch)\n", (2049, 2111), False, 'import os\n'), ((4481, 4494), 'torch.sum', 'torch.sum', (['kl'], {}), '(kl)\n', (4490, 4494), False, 'import torch\n'), ((342, 366), 'numpy.array', 'np.array', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (350, 366), True, 'import numpy as np\n'), ((3671, 3694), 'torch.log', 'torch.log', (['(output + eps)'], {}), '(output + eps)\n', (3680, 3694), False, 'import torch\n'), ((3712, 3739), 'torch.log', 'torch.log', (['(1 - output + eps)'], {}), '(1 - output + eps)\n', (3721, 3739), False, 'import torch\n'), ((1019, 1036), 'math.pow', 'math.pow', (['r', 'step'], {}), '(r, step)\n', (1027, 1036), False, 'import math\n'), ((3535, 3558), 'torch.log', 'torch.log', (['(output + eps)'], {}), '(output + eps)\n', (3544, 3558), False, 'import torch\n'), ((3608, 3635), 'torch.log', 'torch.log', (['(1 - output + eps)'], {}), '(1 - output + eps)\n', (3617, 3635), False, 'import torch\n'), ((4177, 4204), 'torch.log', 'torch.log', (['(sig_square + eps)'], {}), '(sig_square + eps)\n', (4186, 4204), False, 'import torch\n')] |
from dataclasses import field
from marshmallow import Schema, ValidationError, post_load, schema
from marshmallow.validate import OneOf, Length
from marshmallow.fields import Bool, Str, List, Nested, Email
from flask_rebar import ResponseSchema, RequestSchema, errors
from ecosante.inscription.models import Inscription
from ecosante.utils.custom_fields import TempList
from ecosante.api.schemas.commune import CommuneSchema
from ecosante.extensions import celery
from indice_pollution.history.models import Commune as CommuneModel
from flask import request
def list_str(choices, max_length=None, temp=False, **kwargs):
t = TempList if temp else List
return t(
Str(validate=OneOf(choices=choices)),
required=False,
allow_none=True,
validate=Length(min=0, max=max_length) if max_length else None,
**kwargs
)
class User(Schema):
commune = Nested(CommuneSchema, required=False, allow_none=True)
uid = Str(dump_only=True)
mail = Email(required=True)
deplacement = list_str(["velo", "tec", "voiture", "aucun"])
activites = list_str(["jardinage", "bricolage", "menage", "sport", "aucun"])
enfants = list_str(["oui", "non", "aucun"], temp=True)
chauffage = list_str(["bois", "chaudiere", "appoint", "aucun"])
animaux_domestiques = list_str(["chat", "chien", "aucun"])
connaissance_produit = list_str(["medecin", "association", "reseaux_sociaux", "publicite", "ami", "autrement"])
population = list_str(["pathologie_respiratoire", "allergie_pollens", "aucun"])
indicateurs = list_str(["indice_atmo", "raep", "indice_uv", "vigilance_meteorologique"])
indicateurs_frequence = list_str(["quotidien", "hebdomadaire", "alerte"], 1)
indicateurs_media = list_str(["mail", "notifications_web"])
recommandations = list_str(["oui", "non"], 1, attribute='recommandations_actives')
recommandations_frequence = list_str(["quotidien", "hebdomadaire", "pollution"], 1)
recommandations_media = list_str(["mail", "notifications_web"])
webpush_subscriptions_info = Str(required=False, allow_none=True, load_only=True)
class Response(User, ResponseSchema):
is_active = Bool(attribute='is_active')
class RequestPOST(User, RequestSchema):
@post_load
def make_inscription(self, data, **kwargs):
inscription = Inscription.query.filter(Inscription.mail.ilike(data['mail'])).first()
if inscription:
raise ValidationError('mail already used', field_name='mail')
inscription = Inscription(**data)
return inscription
class RequestPOSTID(User, RequestSchema):
def __init__(self, **kwargs):
super_kwargs = dict(kwargs)
partial_arg = super_kwargs.pop('partial', ['mail'])
super(RequestPOSTID, self).__init__(partial=partial_arg, **super_kwargs)
@post_load
def make_inscription(self, data, **kwargs):
uid = request.view_args.get('uid')
if not uid:
raise ValidationError('uid is required')
inscription = Inscription.query.filter_by(uid=uid).first()
if not inscription:
raise errors.NotFound('uid unknown')
if 'mail' in data:
inscription_same_mail = Inscription.query.filter(
Inscription.uid != uid,
Inscription.mail == data['mail']
).first()
if inscription_same_mail:
raise errors.Conflict('user with this mail already exists')
for k, v in data.items():
setattr(inscription, k, v)
return inscription
class RequestUpdateProfile(Schema):
mail = Email(required=True) | [
"flask_rebar.errors.NotFound",
"marshmallow.ValidationError",
"flask.request.view_args.get",
"marshmallow.fields.Nested",
"marshmallow.fields.Str",
"ecosante.inscription.models.Inscription",
"flask_rebar.errors.Conflict",
"marshmallow.validate.OneOf",
"marshmallow.fields.Bool",
"ecosante.inscripti... | [((896, 950), 'marshmallow.fields.Nested', 'Nested', (['CommuneSchema'], {'required': '(False)', 'allow_none': '(True)'}), '(CommuneSchema, required=False, allow_none=True)\n', (902, 950), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((961, 980), 'marshmallow.fields.Str', 'Str', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (964, 980), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((992, 1012), 'marshmallow.fields.Email', 'Email', ([], {'required': '(True)'}), '(required=True)\n', (997, 1012), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((2062, 2114), 'marshmallow.fields.Str', 'Str', ([], {'required': '(False)', 'allow_none': '(True)', 'load_only': '(True)'}), '(required=False, allow_none=True, load_only=True)\n', (2065, 2114), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((2171, 2198), 'marshmallow.fields.Bool', 'Bool', ([], {'attribute': '"""is_active"""'}), "(attribute='is_active')\n", (2175, 2198), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((3610, 3630), 'marshmallow.fields.Email', 'Email', ([], {'required': '(True)'}), '(required=True)\n', (3615, 3630), False, 'from marshmallow.fields import Bool, Str, List, Nested, Email\n'), ((2521, 2540), 'ecosante.inscription.models.Inscription', 'Inscription', ([], {}), '(**data)\n', (2532, 2540), False, 'from ecosante.inscription.models import Inscription\n'), ((2901, 2929), 'flask.request.view_args.get', 'request.view_args.get', (['"""uid"""'], {}), "('uid')\n", (2922, 2929), False, 'from flask import request\n'), ((2443, 2498), 'marshmallow.ValidationError', 'ValidationError', (['"""mail already used"""'], {'field_name': '"""mail"""'}), "('mail already used', field_name='mail')\n", (2458, 2498), False, 'from marshmallow import Schema, ValidationError, post_load, schema\n'), ((2968, 3002), 'marshmallow.ValidationError', 'ValidationError', (['"""uid is required"""'], {}), "('uid is required')\n", (2983, 3002), False, 'from marshmallow import Schema, ValidationError, post_load, schema\n'), ((3116, 3146), 'flask_rebar.errors.NotFound', 'errors.NotFound', (['"""uid unknown"""'], {}), "('uid unknown')\n", (3131, 3146), False, 'from flask_rebar import ResponseSchema, RequestSchema, errors\n'), ((692, 714), 'marshmallow.validate.OneOf', 'OneOf', ([], {'choices': 'choices'}), '(choices=choices)\n', (697, 714), False, 'from marshmallow.validate import OneOf, Length\n'), ((783, 812), 'marshmallow.validate.Length', 'Length', ([], {'min': '(0)', 'max': 'max_length'}), '(min=0, max=max_length)\n', (789, 812), False, 'from marshmallow.validate import OneOf, Length\n'), ((3025, 3061), 'ecosante.inscription.models.Inscription.query.filter_by', 'Inscription.query.filter_by', ([], {'uid': 'uid'}), '(uid=uid)\n', (3052, 3061), False, 'from ecosante.inscription.models import Inscription\n'), ((3407, 3460), 'flask_rebar.errors.Conflict', 'errors.Conflict', (['"""user with this mail already exists"""'], {}), "('user with this mail already exists')\n", (3422, 3460), False, 'from flask_rebar import ResponseSchema, RequestSchema, errors\n'), ((2355, 2391), 'ecosante.inscription.models.Inscription.mail.ilike', 'Inscription.mail.ilike', (["data['mail']"], {}), "(data['mail'])\n", (2377, 2391), False, 'from ecosante.inscription.models import Inscription\n'), ((3210, 3297), 'ecosante.inscription.models.Inscription.query.filter', 'Inscription.query.filter', (['(Inscription.uid != uid)', "(Inscription.mail == data['mail'])"], {}), "(Inscription.uid != uid, Inscription.mail == data[\n 'mail'])\n", (3234, 3297), False, 'from ecosante.inscription.models import Inscription\n')] |
import logging
from thespian.actors import *
from eventsourcing.application.process import ProcessApplication, Prompt
from eventsourcing.application.system import System, SystemRunner
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.interface.notificationlog import RecordManagerNotificationLog
logger = logging.getLogger()
# Todo: Send timer message to run slave every so often (in master or slave?).
DEFAULT_ACTORS_LOGCFG = {
'version': 1,
'formatters': {
'normal': {
'format': '%(levelname)-8s %(message)s'
}
},
'handlers': {
# 'h': {
# 'class': 'logging.FileHandler',
# 'filename': 'hello.log',
# 'formatter': 'normal',
# 'level': logging.INFO
# }
},
'loggers': {
# '': {'handlers': ['h'], 'level': logging.DEBUG}
}
}
def start_actor_system(system_base=None, logcfg=DEFAULT_ACTORS_LOGCFG):
ActorSystem(
systemBase=system_base,
logDefs=logcfg,
)
def shutdown_actor_system():
ActorSystem().shutdown()
def start_multiproc_tcp_base_system():
start_actor_system(system_base='multiprocTCPBase')
# def start_multiproc_udp_base_system():
# start_actor_system(system_base='multiprocUDPBase')
#
#
# def start_multiproc_queue_base_system():
# start_actor_system(system_base='multiprocQueueBase')
class ActorModelRunner(SystemRunner):
"""
Uses actor model framework to run a system of process applications.
"""
def __init__(self, system: System, pipeline_ids, system_actor_name='system', shutdown_on_close=False, **kwargs):
super(ActorModelRunner, self).__init__(system=system, **kwargs)
self.pipeline_ids = list(pipeline_ids)
self.pipeline_actors = {}
self.system_actor_name = system_actor_name
# Create the system actor (singleton).
self.system_actor = self.actor_system.createActor(
actorClass=SystemActor,
globalName=self.system_actor_name
)
self.shutdown_on_close = shutdown_on_close
@property
def actor_system(self):
return ActorSystem()
def start(self):
"""
Starts all the actors to run a system of process applications.
"""
# Subscribe to broadcast prompts published by a process
# application in the parent operating system process.
subscribe(handler=self.forward_prompt, predicate=self.is_prompt)
# Initialise the system actor.
msg = SystemInitRequest(
self.system.process_classes,
self.infrastructure_class,
self.system.followings,
self.pipeline_ids
)
response = self.actor_system.ask(self.system_actor, msg)
# Keep the pipeline actor addresses, to send prompts directly.
assert isinstance(response, SystemInitResponse), type(response)
assert list(response.pipeline_actors.keys()) == self.pipeline_ids, (
"Configured pipeline IDs mismatch initialised system {} {}").format(
list(self.pipeline_actors.keys()), self.pipeline_ids
)
self.pipeline_actors = response.pipeline_actors
# Todo: Somehow know when to get a new address from the system actor.
# Todo: Command and response messages to system actor to get new pipeline address.
@staticmethod
def is_prompt(event):
return isinstance(event, Prompt)
def forward_prompt(self, prompt):
if prompt.pipeline_id in self.pipeline_actors:
pipeline_actor = self.pipeline_actors[prompt.pipeline_id]
self.actor_system.tell(pipeline_actor, prompt)
# else:
# msg = "Pipeline {} is not running.".format(prompt.pipeline_id)
# raise ValueError(msg)
def close(self):
"""Stops all the actors running a system of process applications."""
super(ActorModelRunner, self).close()
unsubscribe(handler=self.forward_prompt, predicate=self.is_prompt)
if self.shutdown_on_close:
self.shutdown()
def shutdown(self):
msg = ActorExitRequest(recursive=True)
self.actor_system.tell(self.system_actor, msg)
class SystemActor(Actor):
def __init__(self):
super(SystemActor, self).__init__()
self.pipeline_actors = {}
self.is_initialised = False
def receiveMessage(self, msg, sender):
if isinstance(msg, SystemInitRequest):
if not self.is_initialised:
self.init_pipelines(msg)
self.is_initialised = True
msg = SystemInitResponse(self.pipeline_actors.copy())
self.send(sender, msg)
def init_pipelines(self, msg):
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
for pipeline_id in msg.pipeline_ids:
pipeline_actor = self.createActor(PipelineActor)
self.pipeline_actors[pipeline_id] = pipeline_actor
msg = PipelineInitRequest(
self.process_classes,
self.infrastructure_class,
self.system_followings,
pipeline_id
)
self.send(pipeline_actor, msg)
class PipelineActor(Actor):
def __init__(self):
super(PipelineActor, self).__init__()
self.system = None
self.process_actors = {}
self.pipeline_id = None
def receiveMessage(self, msg, sender):
if isinstance(msg, PipelineInitRequest):
# logger.info("pipeline received init: {}".format(msg))
self.init_pipeline(msg)
elif isinstance(msg, Prompt):
# logger.info("pipeline received prompt: {}".format(msg))
self.forward_prompt(msg)
def init_pipeline(self, msg):
self.pipeline_id = msg.pipeline_id
self.process_classes = msg.process_classes
self.infrastructure_class = msg.infrastructure_class
self.system_followings = msg.system_followings
self.followers = {}
for process_class_name, upstream_class_names in self.system_followings.items():
for upstream_class_name in upstream_class_names:
process_name = upstream_class_name.lower()
if process_name not in self.followers:
self.followers[process_name] = []
downstream_class_names = self.followers[process_name]
if process_class_name not in downstream_class_names:
downstream_class_names.append(process_class_name)
process_class_names = self.system_followings.keys()
for process_class_name in process_class_names:
process_actor = self.createActor(ProcessMaster)
process_name = process_class_name.lower()
self.process_actors[process_name] = process_actor
for process_class_name in process_class_names:
process_name = process_class_name.lower()
upstream_application_names = [c.lower() for c in self.system_followings[process_class_name]]
downstream_actors = {}
for downstream_class_name in self.followers[process_name]:
downstream_name = downstream_class_name.lower()
# logger.warning("sending prompt to process application {}".format(downstream_name))
process_actor = self.process_actors[downstream_name]
downstream_actors[downstream_name] = process_actor
process_class = self.process_classes[process_class_name]
msg = ProcessInitRequest(
process_class,
self.infrastructure_class,
self.pipeline_id,
upstream_application_names,
downstream_actors,
self.myAddress
)
self.send(self.process_actors[process_name], msg)
def forward_prompt(self, msg):
for downstream_class_name in self.followers[msg.process_name]:
downstream_name = downstream_class_name.lower()
process_actor = self.process_actors[downstream_name]
self.send(process_actor, msg)
class ProcessMaster(Actor):
def __init__(self):
super(ProcessMaster, self).__init__()
self.is_slave_running = False
self.last_prompts = {}
self.slave_actor = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
self.init_process(msg)
elif isinstance(msg, Prompt):
# logger.warning("{} master received prompt: {}".format(self.process_application_class.__name__, msg))
self.consume_prompt(prompt=msg)
elif isinstance(msg, SlaveRunResponse):
# logger.info("process application master received slave finished run: {}".format(msg))
self.handle_slave_run_response()
def init_process(self, msg):
self.process_application_class = msg.process_application_class
self.infrastructure_class = msg.infrastructure_class
self.slave_actor = self.createActor(ProcessSlave)
self.send(self.slave_actor, msg)
self.run_slave()
def consume_prompt(self, prompt):
self.last_prompts[prompt.process_name] = prompt
self.run_slave()
def handle_slave_run_response(self):
self.is_slave_running = False
if self.last_prompts:
self.run_slave()
def run_slave(self):
# Don't send to slave if we think it's running, or we'll
# probably get blocked while sending the message and have
# to wait until the slave runs its loop (thespian design).
if self.slave_actor and not self.is_slave_running:
self.send(self.slave_actor, SlaveRunRequest(self.last_prompts, self.myAddress))
self.is_slave_running = True
self.last_prompts = {}
class ProcessSlave(Actor):
def __init__(self):
super(ProcessSlave, self).__init__()
self.process = None
def receiveMessage(self, msg, sender):
if isinstance(msg, ProcessInitRequest):
# logger.info("process application slave received init: {}".format(msg))
self.init_process(msg)
elif isinstance(msg, SlaveRunRequest):
# logger.info("{} process application slave received last prompts: {}".format(self.process.name, msg))
self.run_process(msg)
elif isinstance(msg, ActorExitRequest):
# logger.info("{} process application slave received exit request: {}".format(self.process.name, msg))
self.close()
def init_process(self, msg):
self.pipeline_actor = msg.pipeline_actor
self.downstream_actors = msg.downstream_actors
self.pipeline_id = msg.pipeline_id
self.upstream_application_names = msg.upstream_application_names
# Construct the process application class.
process_class = msg.process_application_class
if msg.infrastructure_class:
process_class = process_class.mixin(msg.infrastructure_class)
# Reset the database connection (for Django).
process_class.reset_connection_after_forking()
# Construct the process application.
self.process = process_class(
pipeline_id=self.pipeline_id,
)
assert isinstance(self.process, ProcessApplication)
# Subscribe the slave actor's send_prompt() method.
# - the process application will call publish_prompt()
# and the actor will receive the prompt and send it
# as a message.
subscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
# Close the process application persistence policy.
# - slave actor process application doesn't publish
# events, so we don't need this
self.process.persistence_policy.close()
# Unsubscribe process application's publish_prompt().
# - slave actor process application doesn't publish
# events, so we don't need this
unsubscribe(
predicate=self.process.persistence_policy.is_event,
handler=self.process.publish_prompt
)
# Construct and follow upstream notification logs.
for upstream_application_name in self.upstream_application_names:
record_manager = self.process.event_store.record_manager
# assert isinstance(record_manager, ACIDRecordManager), type(record_manager)
notification_log = RecordManagerNotificationLog(
record_manager=record_manager.clone(
application_name=upstream_application_name,
pipeline_id=self.pipeline_id
),
section_size=self.process.notification_log_section_size
)
self.process.follow(upstream_application_name, notification_log)
def run_process(self, msg):
notification_count = 0
# Just process one notification so prompts are dispatched promptly, sent
# messages only dispatched from actor after receive_message() returns.
advance_by = 1
if msg.last_prompts:
for prompt in msg.last_prompts.values():
notification_count += self.process.run(prompt, advance_by=advance_by)
else:
notification_count += self.process.run(advance_by=advance_by)
if notification_count:
# Run again, until nothing was done.
self.send(self.myAddress, SlaveRunRequest(last_prompts={}, master=msg.master))
else:
# Report back to master.
self.send(msg.master, SlaveRunResponse())
def close(self):
unsubscribe(
predicate=self.is_my_prompt,
handler=self.send_prompt
)
self.process.close()
def is_my_prompt(self, prompt):
return (
isinstance(prompt, Prompt)
and prompt.process_name == self.process.name
and prompt.pipeline_id == self.pipeline_id
)
def send_prompt(self, prompt):
for downstream_name, downstream_actor in self.downstream_actors.items():
self.send(downstream_actor, prompt)
class SystemInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_ids):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_ids = pipeline_ids
class SystemInitResponse(object):
def __init__(self, pipeline_actors):
self.pipeline_actors = pipeline_actors
class PipelineInitRequest(object):
def __init__(self, process_classes, infrastructure_class, system_followings, pipeline_id):
self.process_classes = process_classes
self.infrastructure_class = infrastructure_class
self.system_followings = system_followings
self.pipeline_id = pipeline_id
class ProcessInitRequest(object):
def __init__(self, process_application_class, infrastructure_class, pipeline_id,
upstream_application_names,
downstream_actors,
pipeline_actor):
self.process_application_class = process_application_class
self.infrastructure_class = infrastructure_class
self.pipeline_id = pipeline_id
self.upstream_application_names = upstream_application_names
self.downstream_actors = downstream_actors
self.pipeline_actor = pipeline_actor
class SlaveRunRequest(object):
def __init__(self, last_prompts, master):
self.last_prompts = last_prompts
self.master = master
class SlaveRunResponse(object):
pass
| [
"logging.getLogger",
"eventsourcing.domain.model.events.unsubscribe",
"eventsourcing.domain.model.events.subscribe"
] | [((346, 365), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (363, 365), False, 'import logging\n'), ((2431, 2495), 'eventsourcing.domain.model.events.subscribe', 'subscribe', ([], {'handler': 'self.forward_prompt', 'predicate': 'self.is_prompt'}), '(handler=self.forward_prompt, predicate=self.is_prompt)\n', (2440, 2495), False, 'from eventsourcing.domain.model.events import subscribe, unsubscribe\n'), ((3986, 4052), 'eventsourcing.domain.model.events.unsubscribe', 'unsubscribe', ([], {'handler': 'self.forward_prompt', 'predicate': 'self.is_prompt'}), '(handler=self.forward_prompt, predicate=self.is_prompt)\n', (3997, 4052), False, 'from eventsourcing.domain.model.events import subscribe, unsubscribe\n'), ((11705, 11769), 'eventsourcing.domain.model.events.subscribe', 'subscribe', ([], {'predicate': 'self.is_my_prompt', 'handler': 'self.send_prompt'}), '(predicate=self.is_my_prompt, handler=self.send_prompt)\n', (11714, 11769), False, 'from eventsourcing.domain.model.events import subscribe, unsubscribe\n'), ((12192, 12297), 'eventsourcing.domain.model.events.unsubscribe', 'unsubscribe', ([], {'predicate': 'self.process.persistence_policy.is_event', 'handler': 'self.process.publish_prompt'}), '(predicate=self.process.persistence_policy.is_event, handler=\n self.process.publish_prompt)\n', (12203, 12297), False, 'from eventsourcing.domain.model.events import subscribe, unsubscribe\n'), ((13839, 13905), 'eventsourcing.domain.model.events.unsubscribe', 'unsubscribe', ([], {'predicate': 'self.is_my_prompt', 'handler': 'self.send_prompt'}), '(predicate=self.is_my_prompt, handler=self.send_prompt)\n', (13850, 13905), False, 'from eventsourcing.domain.model.events import subscribe, unsubscribe\n')] |
from rest_framework import mixins, viewsets, status
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from apps.transmissions.models import Transmission
from apps.transmissions.serializers import TransmissionModelSerializer, CommentModelserializer
from django_filters import rest_framework as filters
class TransmissionsViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Transmission.objects.all().order_by('is_yt_stream')
serializer_class = TransmissionModelSerializer
filter_backends = (filters.DjangoFilterBackend,)
lookup_field = 'uuid'
def get_permissions(self):
if self.action in ['retrieve', 'list']:
permissions = [AllowAny]
else:
permissions = [IsAuthenticated]
return [p() for p in permissions]
class TransmissionFilter(filters.FilterSet):
class Meta:
model = Transmission
fields = {
'category':['exact'],
'is_live': ['exact'] ,
'required_auth': ['exact'],
'broadcast_date': ['exact', 'contains']
}
filterset_class = TransmissionFilter
| [
"apps.transmissions.models.Transmission.objects.all"
] | [((540, 566), 'apps.transmissions.models.Transmission.objects.all', 'Transmission.objects.all', ([], {}), '()\n', (564, 566), False, 'from apps.transmissions.models import Transmission\n')] |
import h5py
from ont_fast5_api.conversion_tools import multi_to_single_fast5
from ont_fast5_api import fast5_interface
import SequenceGenerator.align as align
import SignalExtractor.Nanopolish as events
from testFiles.test_commands import *
import os, sys
import subprocess
#todo get basecall data
def basecall_test(fastPath):
files = os.listdir("Data/basecall")
#check if basecall file already exists
for f in files:
if f.endswith(".fasta") or f.endswith(".fa") or f.endswith(".fastq") or f.endswith(".fq"):
if os.stat("Data/basecall/" + f).st_size > 1000:
return
print("missing basecall file****/creating basecall file")
bcCmd = "scrappie raw " + fastPath + " > " + os.getcwd() + "/Data/basecall/reads.fa"
#create basecall file
try:
subprocess.run([bcCmd], check = True)
#scrappie_basecall(fastPath)
#checking if file not in right fast5 format(multi/single)
except subprocess.CalledProcessError:
export_scrappie_path()
print("got error / process error")
#export scrappie cmd (might not be exported correctly)
export_scrappie_path()
#checking if already in single directory
if 'single' in fastPath:
print("|||\/|| Already in single folder")
#todo insert flappie
#convert multi fast5 to single fast5 and move files into single directory.
elif 'single' not in os.listdir(fastPath):
print("converting fast5 to single fast5")
convert_fast5_type(fastPath)
scrappie_basecall_single(fastPath)
#if path doesn't exist or no files
except FileNotFoundError:
#export_scrappie_path()
print("got error / no file found ")
#scrappie_basecall_single(fastPath)
sys.exit()
#any error (default error"export scrappie and try again")
except:
export_scrappie_path()
scrappie_basecall(fastPath)
#check if basecall created successfully
if os.stat("Data/basecall/reads.fa").st_size > 0:
print("created basecall file****")
else:
print("Couldn't create basecall file")
#test to check if required files are created
def file_test(bed_file, ref_file, sam_file):
if bed_file == None:
print("bed file test failed****")
raise FileNotFoundError
#set ref file
if ref_file != None:
#fasta input
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fasta files exist in directory
fastfile = os.getcwd() + "/Data/basecall/" + ffile
#check if you found a fasta/fastq file in directory
if fastfile.endswith(".fastq") != True and fastfile.endswith(".fasta") != True and fastfile.endswith(".fa") != True:
print("basecall test failed****")
raise FileNotFoundError
#download reference file
else:
#use default ref files
refFlag = False
#defaultReferenceFile = "Homo_sapiens.GRCh38.dna.alt.fa"
#defaultReferenceFile = "refgenome"
defaultReferenceFile = "grch38.fna"
#defaultReferenceFile = "coli-ref.fa"
downloadedFlag = False
#check if default reference file exists
for f in os.listdir(os.getcwd()):
if f == defaultReferenceFile:
print("reference downloaded already****")
downloadedFlag = True
#download reference file
if downloadedFlag != True:
#os.system("wget -O refgenome.tar.gz ftp://igenome:G3nom3s4u@ussd-ftp.illumina.com/Homo_sapiens/Ensembl/GRCh37/Homo_sapiens_Ensembl_GRCh37.tar.gz")
#os.system("wget -O refgenome.gz ftp://ftp.ncbi.nlm.nih.gov/refseq/H_sapiens/annotation/GRCh37_latest/refseq_identifiers/GRCh37_latest_genomic.fna.gz")
os.system("wget -O grch38.fna.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/GCA_000001405.15_GRCh38/GCA_000001405.15_GRCh38_genomic.fna.gz")
#os.system("wget -O ftp://ftp.ensembl.org/pub/release-100/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.alt.fa.gz")
#os.system("tar -xzf refgenome.tar.gz")
#os.system("gunzip refgenome.gz")
os.system("gzip -d grch38.fna.gz")
print("gunzipping reference genome****")
#os.system("gunzip -v Homo_sapiens.GRCh38.dna.alt.fa.gz")
for f in os.listdir(os.getcwd()):
if f == "Homo_sapiens" or f == defaultReferenceFile or f == "refgenome":
refFlag = True
break
ref_file = defaultReferenceFile
#if file download wasn't successful
if refFlag == False and downloadedFlag != True:
print("ref file test failed****")
raise FileNotFoundError
#get basecalled file
fastfile = os.getcwd() + "/Data/basecall/"
for ffile in os.listdir(fastfile):
if ffile.endswith(".fastq") or ffile.endswith(".fasta") or ffile.endswith(".fa"):
#check if fast files exist in directory
fastfile += ffile
break
#if no fasta/fastq file found
if fastfile == os.getcwd() + "/Data/basecall/":
print("basecall file test failed****")
raise FileNotFoundError
if sam_file == None:
#ref file exists so align here
sam_file = get_sam_file(fastfile, ref_file)
elif sam_file == None:
print("sam file test failed****")
raise FileNotFoundError
if bed_file != None:
print("\nbed file test passed****")
if sam_file != None:
print("sam file test passed****")
return bed_file, ref_file, sam_file
def id_file_test():
for f in os.listdir("./Data/"):
if f == "Fast5_ids.txt":
print("id test passed****")
return
def get_sam_file(fastfile, ref_file):
#check if sam file exists on our directory
if "Alignment.sam" in os.listdir("Data"):
#prompt to create new sam file
choice = input("Do you want to create a new sam file?(y/n)")
if choice == 'y':
sam_file = align.minimapAligner(fastfile, ref_file)
else:
return "Data/Alignment.sam"
else:
sam_file = align.minimapAligner(fastfile, ref_file)
return sam_file
#create event info file for machine learning models
def event_check(fpath=None, filename=None, ref=None, NanopolishOnly=True):
#check if event info already exists
if "reads-ref.eventalign.txt" in os.listdir("Data") and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
return "Data/reads-ref.eventalign.txt"
#no events
if ref != None:
#todo fix this bug
if event_align_check() == None:
print("Creating Event Align file****")
#create events(nanopolish code goes here)
#is it a single file or path
if fpath == None:
event_file = events.nanopolish_events(filename, "Data/basecall/", referenceFile=ref)
else:
event_file = events.nanopolish_events(fpath, "Data/basecall/", referenceFile=ref)
print("event file ", event_file)
show_penguin()
return event_file
else:
show_penguin()
return "Data/reads-ref.eventalign.txt"
else:
print("reference file test failed")
raise FileNotFoundError
def show_penguin():
penguin = """
=============================================================
**-..L```|
\ |
* \ |```| |```` |\ | |```| | | ``|`` |\ |
| | \ |___| |___ | \ | |___ | | | | \ |
/*\ | \ | | | \| | | | | | | | \|
|***\ | | | |____ | | |___| \|/ _|_ | |
\****\ \ | |
\***/ \ / |
\*/ /
/___/_____\
=============================================================
"""
print(penguin)
def sequence_check():
pass
def event_align_check():
for file in os.listdir("Data"):
if file == "reads-ref.eventalign.txt" and os.stat("Data/reads-ref.eventalign.txt").st_size > 1000:
print("Event Align Test Passed****")
return "Data/reads-ref.eventalign.txt"
print("Event Align Test Failed****")
return None
def convert_fast5_type(directory):
#go through fast5 files and check if the files is multi or single fast5 file
#we need a single fast5 file
for root, dirs, files in os.walk(directory):
for name in files:
if name.endswith(".fast5"):
fobj = fast5_interface.get_fast5_file(os.path.join(root, name))
if fast5_interface.check_file_type(fobj) == "multi-read":
#convert file to single fast5
print("converting fast5 file****")
multi_to_single_fast5.convert_multi_to_single(os.path.join(root, name), directory, "single")
| [
"os.listdir",
"subprocess.run",
"os.path.join",
"os.getcwd",
"SignalExtractor.Nanopolish.nanopolish_events",
"SequenceGenerator.align.minimapAligner",
"ont_fast5_api.fast5_interface.check_file_type",
"sys.exit",
"os.stat",
"os.system",
"os.walk"
] | [((355, 382), 'os.listdir', 'os.listdir', (['"""Data/basecall"""'], {}), "('Data/basecall')\n", (365, 382), False, 'import os, sys\n'), ((6056, 6077), 'os.listdir', 'os.listdir', (['"""./Data/"""'], {}), "('./Data/')\n", (6066, 6077), False, 'import os, sys\n'), ((8588, 8606), 'os.listdir', 'os.listdir', (['"""Data"""'], {}), "('Data')\n", (8598, 8606), False, 'import os, sys\n'), ((9067, 9085), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (9074, 9085), False, 'import os, sys\n'), ((838, 873), 'subprocess.run', 'subprocess.run', (['[bcCmd]'], {'check': '(True)'}), '([bcCmd], check=True)\n', (852, 873), False, 'import subprocess\n'), ((2574, 2594), 'os.listdir', 'os.listdir', (['fastfile'], {}), '(fastfile)\n', (2584, 2594), False, 'import os, sys\n'), ((5168, 5188), 'os.listdir', 'os.listdir', (['fastfile'], {}), '(fastfile)\n', (5178, 5188), False, 'import os, sys\n'), ((6290, 6308), 'os.listdir', 'os.listdir', (['"""Data"""'], {}), "('Data')\n", (6300, 6308), False, 'import os, sys\n'), ((6601, 6641), 'SequenceGenerator.align.minimapAligner', 'align.minimapAligner', (['fastfile', 'ref_file'], {}), '(fastfile, ref_file)\n', (6621, 6641), True, 'import SequenceGenerator.align as align\n'), ((751, 762), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (760, 762), False, 'import os, sys\n'), ((1853, 1863), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1861, 1863), False, 'import os, sys\n'), ((2072, 2105), 'os.stat', 'os.stat', (['"""Data/basecall/reads.fa"""'], {}), "('Data/basecall/reads.fa')\n", (2079, 2105), False, 'import os, sys\n'), ((2520, 2531), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2529, 2531), False, 'import os, sys\n'), ((3505, 3516), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3514, 3516), False, 'import os, sys\n'), ((4069, 4231), 'os.system', 'os.system', (['"""wget -O grch38.fna.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/GCA_000001405.15_GRCh38/GCA_000001405.15_GRCh38_genomic.fna.gz"""'], {}), "(\n 'wget -O grch38.fna.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/001/405/GCA_000001405.15_GRCh38/GCA_000001405.15_GRCh38_genomic.fna.gz'\n )\n", (4078, 4231), False, 'import os, sys\n'), ((4465, 4499), 'os.system', 'os.system', (['"""gzip -d grch38.fna.gz"""'], {}), "('gzip -d grch38.fna.gz')\n", (4474, 4499), False, 'import os, sys\n'), ((5114, 5125), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5123, 5125), False, 'import os, sys\n'), ((6471, 6511), 'SequenceGenerator.align.minimapAligner', 'align.minimapAligner', (['fastfile', 'ref_file'], {}), '(fastfile, ref_file)\n', (6491, 6511), True, 'import SequenceGenerator.align as align\n'), ((6875, 6893), 'os.listdir', 'os.listdir', (['"""Data"""'], {}), "('Data')\n", (6885, 6893), False, 'import os, sys\n'), ((4658, 4669), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4667, 4669), False, 'import os, sys\n'), ((5465, 5476), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5474, 5476), False, 'import os, sys\n'), ((6898, 6938), 'os.stat', 'os.stat', (['"""Data/reads-ref.eventalign.txt"""'], {}), "('Data/reads-ref.eventalign.txt')\n", (6905, 6938), False, 'import os, sys\n'), ((7325, 7396), 'SignalExtractor.Nanopolish.nanopolish_events', 'events.nanopolish_events', (['filename', '"""Data/basecall/"""'], {'referenceFile': 'ref'}), "(filename, 'Data/basecall/', referenceFile=ref)\n", (7349, 7396), True, 'import SignalExtractor.Nanopolish as events\n'), ((7446, 7514), 'SignalExtractor.Nanopolish.nanopolish_events', 'events.nanopolish_events', (['fpath', '"""Data/basecall/"""'], {'referenceFile': 'ref'}), "(fpath, 'Data/basecall/', referenceFile=ref)\n", (7470, 7514), True, 'import SignalExtractor.Nanopolish as events\n'), ((564, 593), 'os.stat', 'os.stat', (["('Data/basecall/' + f)"], {}), "('Data/basecall/' + f)\n", (571, 593), False, 'import os, sys\n'), ((1481, 1501), 'os.listdir', 'os.listdir', (['fastPath'], {}), '(fastPath)\n', (1491, 1501), False, 'import os, sys\n'), ((8659, 8699), 'os.stat', 'os.stat', (['"""Data/reads-ref.eventalign.txt"""'], {}), "('Data/reads-ref.eventalign.txt')\n", (8666, 8699), False, 'import os, sys\n'), ((9211, 9235), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (9223, 9235), False, 'import os, sys\n'), ((9257, 9294), 'ont_fast5_api.fast5_interface.check_file_type', 'fast5_interface.check_file_type', (['fobj'], {}), '(fobj)\n', (9288, 9294), False, 'from ont_fast5_api import fast5_interface\n'), ((2777, 2788), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2786, 2788), False, 'import os, sys\n'), ((9486, 9510), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (9498, 9510), False, 'import os, sys\n')] |
from setuptools import setup
setup(
name='potnanny-api',
version='0.2.6',
packages=['potnanny_api'],
include_package_data=True,
description='Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jeffleary00/potnanny-api',
install_requires=[
'requests',
'passlib',
'sqlalchemy',
'marshmallow',
'flask',
'flask-restful',
'flask-jwt-extended',
'flask-wtf',
'potnanny-core==0.2.9',
],
)
| [
"setuptools.setup"
] | [((30, 528), 'setuptools.setup', 'setup', ([], {'name': '"""potnanny-api"""', 'version': '"""0.2.6"""', 'packages': "['potnanny_api']", 'include_package_data': '(True)', 'description': '"""Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/jeffleary00/potnanny-api"""', 'install_requires': "['requests', 'passlib', 'sqlalchemy', 'marshmallow', 'flask',\n 'flask-restful', 'flask-jwt-extended', 'flask-wtf', 'potnanny-core==0.2.9']"}), "(name='potnanny-api', version='0.2.6', packages=['potnanny_api'],\n include_package_data=True, description=\n 'Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface.'\n , author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/jeffleary00/potnanny-api', install_requires=[\n 'requests', 'passlib', 'sqlalchemy', 'marshmallow', 'flask',\n 'flask-restful', 'flask-jwt-extended', 'flask-wtf', 'potnanny-core==0.2.9']\n )\n", (35, 528), False, 'from setuptools import setup\n')] |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _envcpp
else:
import _envcpp
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _envcpp.delete_SwigPyIterator
def value(self):
return _envcpp.SwigPyIterator_value(self)
def incr(self, n=1):
return _envcpp.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _envcpp.SwigPyIterator_decr(self, n)
def distance(self, x):
return _envcpp.SwigPyIterator_distance(self, x)
def equal(self, x):
return _envcpp.SwigPyIterator_equal(self, x)
def copy(self):
return _envcpp.SwigPyIterator_copy(self)
def next(self):
return _envcpp.SwigPyIterator_next(self)
def __next__(self):
return _envcpp.SwigPyIterator___next__(self)
def previous(self):
return _envcpp.SwigPyIterator_previous(self)
def advance(self, n):
return _envcpp.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _envcpp.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _envcpp.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _envcpp.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _envcpp.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _envcpp.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _envcpp.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _envcpp:
_envcpp.SwigPyIterator_swigregister(SwigPyIterator)
class vectori(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectori_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectori___nonzero__(self)
def __bool__(self):
return _envcpp.vectori___bool__(self)
def __len__(self):
return _envcpp.vectori___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectori___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectori___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectori___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectori___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectori___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectori___setitem__(self, *args)
def pop(self):
return _envcpp.vectori_pop(self)
def append(self, x):
return _envcpp.vectori_append(self, x)
def empty(self):
return _envcpp.vectori_empty(self)
def size(self):
return _envcpp.vectori_size(self)
def swap(self, v):
return _envcpp.vectori_swap(self, v)
def begin(self):
return _envcpp.vectori_begin(self)
def end(self):
return _envcpp.vectori_end(self)
def rbegin(self):
return _envcpp.vectori_rbegin(self)
def rend(self):
return _envcpp.vectori_rend(self)
def clear(self):
return _envcpp.vectori_clear(self)
def get_allocator(self):
return _envcpp.vectori_get_allocator(self)
def pop_back(self):
return _envcpp.vectori_pop_back(self)
def erase(self, *args):
return _envcpp.vectori_erase(self, *args)
def __init__(self, *args):
_envcpp.vectori_swiginit(self, _envcpp.new_vectori(*args))
def push_back(self, x):
return _envcpp.vectori_push_back(self, x)
def front(self):
return _envcpp.vectori_front(self)
def back(self):
return _envcpp.vectori_back(self)
def assign(self, n, x):
return _envcpp.vectori_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectori_resize(self, *args)
def insert(self, *args):
return _envcpp.vectori_insert(self, *args)
def reserve(self, n):
return _envcpp.vectori_reserve(self, n)
def capacity(self):
return _envcpp.vectori_capacity(self)
__swig_destroy__ = _envcpp.delete_vectori
# Register vectori in _envcpp:
_envcpp.vectori_swigregister(vectori)
class vectord(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectord_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectord___nonzero__(self)
def __bool__(self):
return _envcpp.vectord___bool__(self)
def __len__(self):
return _envcpp.vectord___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectord___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectord___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectord___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectord___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectord___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectord___setitem__(self, *args)
def pop(self):
return _envcpp.vectord_pop(self)
def append(self, x):
return _envcpp.vectord_append(self, x)
def empty(self):
return _envcpp.vectord_empty(self)
def size(self):
return _envcpp.vectord_size(self)
def swap(self, v):
return _envcpp.vectord_swap(self, v)
def begin(self):
return _envcpp.vectord_begin(self)
def end(self):
return _envcpp.vectord_end(self)
def rbegin(self):
return _envcpp.vectord_rbegin(self)
def rend(self):
return _envcpp.vectord_rend(self)
def clear(self):
return _envcpp.vectord_clear(self)
def get_allocator(self):
return _envcpp.vectord_get_allocator(self)
def pop_back(self):
return _envcpp.vectord_pop_back(self)
def erase(self, *args):
return _envcpp.vectord_erase(self, *args)
def __init__(self, *args):
_envcpp.vectord_swiginit(self, _envcpp.new_vectord(*args))
def push_back(self, x):
return _envcpp.vectord_push_back(self, x)
def front(self):
return _envcpp.vectord_front(self)
def back(self):
return _envcpp.vectord_back(self)
def assign(self, n, x):
return _envcpp.vectord_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectord_resize(self, *args)
def insert(self, *args):
return _envcpp.vectord_insert(self, *args)
def reserve(self, n):
return _envcpp.vectord_reserve(self, n)
def capacity(self):
return _envcpp.vectord_capacity(self)
__swig_destroy__ = _envcpp.delete_vectord
# Register vectord in _envcpp:
_envcpp.vectord_swigregister(vectord)
class vectors(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectors_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectors___nonzero__(self)
def __bool__(self):
return _envcpp.vectors___bool__(self)
def __len__(self):
return _envcpp.vectors___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectors___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectors___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectors___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectors___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectors___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectors___setitem__(self, *args)
def pop(self):
return _envcpp.vectors_pop(self)
def append(self, x):
return _envcpp.vectors_append(self, x)
def empty(self):
return _envcpp.vectors_empty(self)
def size(self):
return _envcpp.vectors_size(self)
def swap(self, v):
return _envcpp.vectors_swap(self, v)
def begin(self):
return _envcpp.vectors_begin(self)
def end(self):
return _envcpp.vectors_end(self)
def rbegin(self):
return _envcpp.vectors_rbegin(self)
def rend(self):
return _envcpp.vectors_rend(self)
def clear(self):
return _envcpp.vectors_clear(self)
def get_allocator(self):
return _envcpp.vectors_get_allocator(self)
def pop_back(self):
return _envcpp.vectors_pop_back(self)
def erase(self, *args):
return _envcpp.vectors_erase(self, *args)
def __init__(self, *args):
_envcpp.vectors_swiginit(self, _envcpp.new_vectors(*args))
def push_back(self, x):
return _envcpp.vectors_push_back(self, x)
def front(self):
return _envcpp.vectors_front(self)
def back(self):
return _envcpp.vectors_back(self)
def assign(self, n, x):
return _envcpp.vectors_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectors_resize(self, *args)
def insert(self, *args):
return _envcpp.vectors_insert(self, *args)
def reserve(self, n):
return _envcpp.vectors_reserve(self, n)
def capacity(self):
return _envcpp.vectors_capacity(self)
__swig_destroy__ = _envcpp.delete_vectors
# Register vectors in _envcpp:
_envcpp.vectors_swigregister(vectors)
class Environment(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, filedir):
_envcpp.Environment_swiginit(self, _envcpp.new_Environment(filedir))
__swig_destroy__ = _envcpp.delete_Environment
def get_download_time(self, video_chunk_size):
return _envcpp.Environment_get_download_time(self, video_chunk_size)
def reset_download_time(self):
return _envcpp.Environment_reset_download_time(self)
def get_video_chunk(self, quality):
return _envcpp.Environment_get_video_chunk(self, quality)
def get_optimal(self, last_video_vmaf):
return _envcpp.Environment_get_optimal(self, last_video_vmaf)
optimal = property(_envcpp.Environment_optimal_get, _envcpp.Environment_optimal_set)
delay0 = property(_envcpp.Environment_delay0_get, _envcpp.Environment_delay0_set)
sleep_time0 = property(_envcpp.Environment_sleep_time0_get, _envcpp.Environment_sleep_time0_set)
return_buffer_size0 = property(_envcpp.Environment_return_buffer_size0_get, _envcpp.Environment_return_buffer_size0_set)
rebuf0 = property(_envcpp.Environment_rebuf0_get, _envcpp.Environment_rebuf0_set)
video_chunk_size0 = property(_envcpp.Environment_video_chunk_size0_get, _envcpp.Environment_video_chunk_size0_set)
end_of_video0 = property(_envcpp.Environment_end_of_video0_get, _envcpp.Environment_end_of_video0_set)
video_chunk_remain0 = property(_envcpp.Environment_video_chunk_remain0_get, _envcpp.Environment_video_chunk_remain0_set)
video_chunk_vmaf0 = property(_envcpp.Environment_video_chunk_vmaf0_get, _envcpp.Environment_video_chunk_vmaf0_set)
all_cooked_bw = property(_envcpp.Environment_all_cooked_bw_get, _envcpp.Environment_all_cooked_bw_set)
all_cooked_time = property(_envcpp.Environment_all_cooked_time_get, _envcpp.Environment_all_cooked_time_set)
CHUNK_COMBO_OPTIONS = property(_envcpp.Environment_CHUNK_COMBO_OPTIONS_get, _envcpp.Environment_CHUNK_COMBO_OPTIONS_set)
all_file_names = property(_envcpp.Environment_all_file_names_get, _envcpp.Environment_all_file_names_set)
video_chunk_counter = property(_envcpp.Environment_video_chunk_counter_get, _envcpp.Environment_video_chunk_counter_set)
buffer_size = property(_envcpp.Environment_buffer_size_get, _envcpp.Environment_buffer_size_set)
trace_idx = property(_envcpp.Environment_trace_idx_get, _envcpp.Environment_trace_idx_set)
cooked_time = property(_envcpp.Environment_cooked_time_get, _envcpp.Environment_cooked_time_set)
cooked_bw = property(_envcpp.Environment_cooked_bw_get, _envcpp.Environment_cooked_bw_set)
mahimahi_start_ptr = property(_envcpp.Environment_mahimahi_start_ptr_get, _envcpp.Environment_mahimahi_start_ptr_set)
mahimahi_ptr = property(_envcpp.Environment_mahimahi_ptr_get, _envcpp.Environment_mahimahi_ptr_set)
last_mahimahi_time = property(_envcpp.Environment_last_mahimahi_time_get, _envcpp.Environment_last_mahimahi_time_set)
virtual_mahimahi_ptr = property(_envcpp.Environment_virtual_mahimahi_ptr_get, _envcpp.Environment_virtual_mahimahi_ptr_set)
virtual_last_mahimahi_time = property(_envcpp.Environment_virtual_last_mahimahi_time_get, _envcpp.Environment_virtual_last_mahimahi_time_set)
# Register Environment in _envcpp:
_envcpp.Environment_swigregister(Environment)
| [
"_envcpp.vectord_begin",
"_envcpp.vectori_end",
"_envcpp.vectors_capacity",
"_envcpp.SwigPyIterator_swigregister",
"_envcpp.SwigPyIterator_incr",
"_envcpp.vectori___getitem__",
"_envcpp.vectors_empty",
"_envcpp.vectors_back",
"_envcpp.vectors_size",
"_envcpp.vectord_resize",
"_envcpp.SwigPyItera... | [((4694, 4745), '_envcpp.SwigPyIterator_swigregister', '_envcpp.SwigPyIterator_swigregister', (['SwigPyIterator'], {}), '(SwigPyIterator)\n', (4729, 4745), False, 'import _envcpp\n'), ((7443, 7480), '_envcpp.vectori_swigregister', '_envcpp.vectori_swigregister', (['vectori'], {}), '(vectori)\n', (7471, 7480), False, 'import _envcpp\n'), ((10178, 10215), '_envcpp.vectord_swigregister', '_envcpp.vectord_swigregister', (['vectord'], {}), '(vectord)\n', (10206, 10215), False, 'import _envcpp\n'), ((12913, 12950), '_envcpp.vectors_swigregister', '_envcpp.vectors_swigregister', (['vectors'], {}), '(vectors)\n', (12941, 12950), False, 'import _envcpp\n'), ((16303, 16348), '_envcpp.Environment_swigregister', '_envcpp.Environment_swigregister', (['Environment'], {}), '(Environment)\n', (16335, 16348), False, 'import _envcpp\n'), ((3380, 3414), '_envcpp.SwigPyIterator_value', '_envcpp.SwigPyIterator_value', (['self'], {}), '(self)\n', (3408, 3414), False, 'import _envcpp\n'), ((3456, 3492), '_envcpp.SwigPyIterator_incr', '_envcpp.SwigPyIterator_incr', (['self', 'n'], {}), '(self, n)\n', (3483, 3492), False, 'import _envcpp\n'), ((3534, 3570), '_envcpp.SwigPyIterator_decr', '_envcpp.SwigPyIterator_decr', (['self', 'n'], {}), '(self, n)\n', (3561, 3570), False, 'import _envcpp\n'), ((3614, 3654), '_envcpp.SwigPyIterator_distance', '_envcpp.SwigPyIterator_distance', (['self', 'x'], {}), '(self, x)\n', (3645, 3654), False, 'import _envcpp\n'), ((3695, 3732), '_envcpp.SwigPyIterator_equal', '_envcpp.SwigPyIterator_equal', (['self', 'x'], {}), '(self, x)\n', (3723, 3732), False, 'import _envcpp\n'), ((3769, 3802), '_envcpp.SwigPyIterator_copy', '_envcpp.SwigPyIterator_copy', (['self'], {}), '(self)\n', (3796, 3802), False, 'import _envcpp\n'), ((3839, 3872), '_envcpp.SwigPyIterator_next', '_envcpp.SwigPyIterator_next', (['self'], {}), '(self)\n', (3866, 3872), False, 'import _envcpp\n'), ((3913, 3950), '_envcpp.SwigPyIterator___next__', '_envcpp.SwigPyIterator___next__', (['self'], {}), '(self)\n', (3944, 3950), False, 'import _envcpp\n'), ((3991, 4028), '_envcpp.SwigPyIterator_previous', '_envcpp.SwigPyIterator_previous', (['self'], {}), '(self)\n', (4022, 4028), False, 'import _envcpp\n'), ((4071, 4110), '_envcpp.SwigPyIterator_advance', '_envcpp.SwigPyIterator_advance', (['self', 'n'], {}), '(self, n)\n', (4101, 4110), False, 'import _envcpp\n'), ((4152, 4190), '_envcpp.SwigPyIterator___eq__', '_envcpp.SwigPyIterator___eq__', (['self', 'x'], {}), '(self, x)\n', (4181, 4190), False, 'import _envcpp\n'), ((4232, 4270), '_envcpp.SwigPyIterator___ne__', '_envcpp.SwigPyIterator___ne__', (['self', 'x'], {}), '(self, x)\n', (4261, 4270), False, 'import _envcpp\n'), ((4314, 4354), '_envcpp.SwigPyIterator___iadd__', '_envcpp.SwigPyIterator___iadd__', (['self', 'n'], {}), '(self, n)\n', (4345, 4354), False, 'import _envcpp\n'), ((4398, 4438), '_envcpp.SwigPyIterator___isub__', '_envcpp.SwigPyIterator___isub__', (['self', 'n'], {}), '(self, n)\n', (4429, 4438), False, 'import _envcpp\n'), ((4481, 4520), '_envcpp.SwigPyIterator___add__', '_envcpp.SwigPyIterator___add__', (['self', 'n'], {}), '(self, n)\n', (4511, 4520), False, 'import _envcpp\n'), ((4567, 4610), '_envcpp.SwigPyIterator___sub__', '_envcpp.SwigPyIterator___sub__', (['self', '*args'], {}), '(self, *args)\n', (4597, 4610), False, 'import _envcpp\n'), ((4938, 4968), '_envcpp.vectori_iterator', '_envcpp.vectori_iterator', (['self'], {}), '(self)\n', (4962, 4968), False, 'import _envcpp\n'), ((5067, 5100), '_envcpp.vectori___nonzero__', '_envcpp.vectori___nonzero__', (['self'], {}), '(self)\n', (5094, 5100), False, 'import _envcpp\n'), ((5141, 5171), '_envcpp.vectori___bool__', '_envcpp.vectori___bool__', (['self'], {}), '(self)\n', (5165, 5171), False, 'import _envcpp\n'), ((5211, 5240), '_envcpp.vectori___len__', '_envcpp.vectori___len__', (['self'], {}), '(self)\n', (5234, 5240), False, 'import _envcpp\n'), ((5291, 5331), '_envcpp.vectori___getslice__', '_envcpp.vectori___getslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (5319, 5331), False, 'import _envcpp\n'), ((5383, 5424), '_envcpp.vectori___setslice__', '_envcpp.vectori___setslice__', (['self', '*args'], {}), '(self, *args)\n', (5411, 5424), False, 'import _envcpp\n'), ((5475, 5515), '_envcpp.vectori___delslice__', '_envcpp.vectori___delslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (5503, 5515), False, 'import _envcpp\n'), ((5566, 5606), '_envcpp.vectori___delitem__', '_envcpp.vectori___delitem__', (['self', '*args'], {}), '(self, *args)\n', (5593, 5606), False, 'import _envcpp\n'), ((5657, 5697), '_envcpp.vectori___getitem__', '_envcpp.vectori___getitem__', (['self', '*args'], {}), '(self, *args)\n', (5684, 5697), False, 'import _envcpp\n'), ((5748, 5788), '_envcpp.vectori___setitem__', '_envcpp.vectori___setitem__', (['self', '*args'], {}), '(self, *args)\n', (5775, 5788), False, 'import _envcpp\n'), ((5824, 5849), '_envcpp.vectori_pop', '_envcpp.vectori_pop', (['self'], {}), '(self)\n', (5843, 5849), False, 'import _envcpp\n'), ((5891, 5922), '_envcpp.vectori_append', '_envcpp.vectori_append', (['self', 'x'], {}), '(self, x)\n', (5913, 5922), False, 'import _envcpp\n'), ((5960, 5987), '_envcpp.vectori_empty', '_envcpp.vectori_empty', (['self'], {}), '(self)\n', (5981, 5987), False, 'import _envcpp\n'), ((6024, 6050), '_envcpp.vectori_size', '_envcpp.vectori_size', (['self'], {}), '(self)\n', (6044, 6050), False, 'import _envcpp\n'), ((6090, 6119), '_envcpp.vectori_swap', '_envcpp.vectori_swap', (['self', 'v'], {}), '(self, v)\n', (6110, 6119), False, 'import _envcpp\n'), ((6157, 6184), '_envcpp.vectori_begin', '_envcpp.vectori_begin', (['self'], {}), '(self)\n', (6178, 6184), False, 'import _envcpp\n'), ((6220, 6245), '_envcpp.vectori_end', '_envcpp.vectori_end', (['self'], {}), '(self)\n', (6239, 6245), False, 'import _envcpp\n'), ((6284, 6312), '_envcpp.vectori_rbegin', '_envcpp.vectori_rbegin', (['self'], {}), '(self)\n', (6306, 6312), False, 'import _envcpp\n'), ((6349, 6375), '_envcpp.vectori_rend', '_envcpp.vectori_rend', (['self'], {}), '(self)\n', (6369, 6375), False, 'import _envcpp\n'), ((6413, 6440), '_envcpp.vectori_clear', '_envcpp.vectori_clear', (['self'], {}), '(self)\n', (6434, 6440), False, 'import _envcpp\n'), ((6486, 6521), '_envcpp.vectori_get_allocator', '_envcpp.vectori_get_allocator', (['self'], {}), '(self)\n', (6515, 6521), False, 'import _envcpp\n'), ((6562, 6592), '_envcpp.vectori_pop_back', '_envcpp.vectori_pop_back', (['self'], {}), '(self)\n', (6586, 6592), False, 'import _envcpp\n'), ((6637, 6671), '_envcpp.vectori_erase', '_envcpp.vectori_erase', (['self', '*args'], {}), '(self, *args)\n', (6658, 6671), False, 'import _envcpp\n'), ((6815, 6849), '_envcpp.vectori_push_back', '_envcpp.vectori_push_back', (['self', 'x'], {}), '(self, x)\n', (6840, 6849), False, 'import _envcpp\n'), ((6887, 6914), '_envcpp.vectori_front', '_envcpp.vectori_front', (['self'], {}), '(self)\n', (6908, 6914), False, 'import _envcpp\n'), ((6951, 6977), '_envcpp.vectori_back', '_envcpp.vectori_back', (['self'], {}), '(self)\n', (6971, 6977), False, 'import _envcpp\n'), ((7022, 7056), '_envcpp.vectori_assign', '_envcpp.vectori_assign', (['self', 'n', 'x'], {}), '(self, n, x)\n', (7044, 7056), False, 'import _envcpp\n'), ((7102, 7137), '_envcpp.vectori_resize', '_envcpp.vectori_resize', (['self', '*args'], {}), '(self, *args)\n', (7124, 7137), False, 'import _envcpp\n'), ((7183, 7218), '_envcpp.vectori_insert', '_envcpp.vectori_insert', (['self', '*args'], {}), '(self, *args)\n', (7205, 7218), False, 'import _envcpp\n'), ((7261, 7293), '_envcpp.vectori_reserve', '_envcpp.vectori_reserve', (['self', 'n'], {}), '(self, n)\n', (7284, 7293), False, 'import _envcpp\n'), ((7334, 7364), '_envcpp.vectori_capacity', '_envcpp.vectori_capacity', (['self'], {}), '(self)\n', (7358, 7364), False, 'import _envcpp\n'), ((7673, 7703), '_envcpp.vectord_iterator', '_envcpp.vectord_iterator', (['self'], {}), '(self)\n', (7697, 7703), False, 'import _envcpp\n'), ((7802, 7835), '_envcpp.vectord___nonzero__', '_envcpp.vectord___nonzero__', (['self'], {}), '(self)\n', (7829, 7835), False, 'import _envcpp\n'), ((7876, 7906), '_envcpp.vectord___bool__', '_envcpp.vectord___bool__', (['self'], {}), '(self)\n', (7900, 7906), False, 'import _envcpp\n'), ((7946, 7975), '_envcpp.vectord___len__', '_envcpp.vectord___len__', (['self'], {}), '(self)\n', (7969, 7975), False, 'import _envcpp\n'), ((8026, 8066), '_envcpp.vectord___getslice__', '_envcpp.vectord___getslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (8054, 8066), False, 'import _envcpp\n'), ((8118, 8159), '_envcpp.vectord___setslice__', '_envcpp.vectord___setslice__', (['self', '*args'], {}), '(self, *args)\n', (8146, 8159), False, 'import _envcpp\n'), ((8210, 8250), '_envcpp.vectord___delslice__', '_envcpp.vectord___delslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (8238, 8250), False, 'import _envcpp\n'), ((8301, 8341), '_envcpp.vectord___delitem__', '_envcpp.vectord___delitem__', (['self', '*args'], {}), '(self, *args)\n', (8328, 8341), False, 'import _envcpp\n'), ((8392, 8432), '_envcpp.vectord___getitem__', '_envcpp.vectord___getitem__', (['self', '*args'], {}), '(self, *args)\n', (8419, 8432), False, 'import _envcpp\n'), ((8483, 8523), '_envcpp.vectord___setitem__', '_envcpp.vectord___setitem__', (['self', '*args'], {}), '(self, *args)\n', (8510, 8523), False, 'import _envcpp\n'), ((8559, 8584), '_envcpp.vectord_pop', '_envcpp.vectord_pop', (['self'], {}), '(self)\n', (8578, 8584), False, 'import _envcpp\n'), ((8626, 8657), '_envcpp.vectord_append', '_envcpp.vectord_append', (['self', 'x'], {}), '(self, x)\n', (8648, 8657), False, 'import _envcpp\n'), ((8695, 8722), '_envcpp.vectord_empty', '_envcpp.vectord_empty', (['self'], {}), '(self)\n', (8716, 8722), False, 'import _envcpp\n'), ((8759, 8785), '_envcpp.vectord_size', '_envcpp.vectord_size', (['self'], {}), '(self)\n', (8779, 8785), False, 'import _envcpp\n'), ((8825, 8854), '_envcpp.vectord_swap', '_envcpp.vectord_swap', (['self', 'v'], {}), '(self, v)\n', (8845, 8854), False, 'import _envcpp\n'), ((8892, 8919), '_envcpp.vectord_begin', '_envcpp.vectord_begin', (['self'], {}), '(self)\n', (8913, 8919), False, 'import _envcpp\n'), ((8955, 8980), '_envcpp.vectord_end', '_envcpp.vectord_end', (['self'], {}), '(self)\n', (8974, 8980), False, 'import _envcpp\n'), ((9019, 9047), '_envcpp.vectord_rbegin', '_envcpp.vectord_rbegin', (['self'], {}), '(self)\n', (9041, 9047), False, 'import _envcpp\n'), ((9084, 9110), '_envcpp.vectord_rend', '_envcpp.vectord_rend', (['self'], {}), '(self)\n', (9104, 9110), False, 'import _envcpp\n'), ((9148, 9175), '_envcpp.vectord_clear', '_envcpp.vectord_clear', (['self'], {}), '(self)\n', (9169, 9175), False, 'import _envcpp\n'), ((9221, 9256), '_envcpp.vectord_get_allocator', '_envcpp.vectord_get_allocator', (['self'], {}), '(self)\n', (9250, 9256), False, 'import _envcpp\n'), ((9297, 9327), '_envcpp.vectord_pop_back', '_envcpp.vectord_pop_back', (['self'], {}), '(self)\n', (9321, 9327), False, 'import _envcpp\n'), ((9372, 9406), '_envcpp.vectord_erase', '_envcpp.vectord_erase', (['self', '*args'], {}), '(self, *args)\n', (9393, 9406), False, 'import _envcpp\n'), ((9550, 9584), '_envcpp.vectord_push_back', '_envcpp.vectord_push_back', (['self', 'x'], {}), '(self, x)\n', (9575, 9584), False, 'import _envcpp\n'), ((9622, 9649), '_envcpp.vectord_front', '_envcpp.vectord_front', (['self'], {}), '(self)\n', (9643, 9649), False, 'import _envcpp\n'), ((9686, 9712), '_envcpp.vectord_back', '_envcpp.vectord_back', (['self'], {}), '(self)\n', (9706, 9712), False, 'import _envcpp\n'), ((9757, 9791), '_envcpp.vectord_assign', '_envcpp.vectord_assign', (['self', 'n', 'x'], {}), '(self, n, x)\n', (9779, 9791), False, 'import _envcpp\n'), ((9837, 9872), '_envcpp.vectord_resize', '_envcpp.vectord_resize', (['self', '*args'], {}), '(self, *args)\n', (9859, 9872), False, 'import _envcpp\n'), ((9918, 9953), '_envcpp.vectord_insert', '_envcpp.vectord_insert', (['self', '*args'], {}), '(self, *args)\n', (9940, 9953), False, 'import _envcpp\n'), ((9996, 10028), '_envcpp.vectord_reserve', '_envcpp.vectord_reserve', (['self', 'n'], {}), '(self, n)\n', (10019, 10028), False, 'import _envcpp\n'), ((10069, 10099), '_envcpp.vectord_capacity', '_envcpp.vectord_capacity', (['self'], {}), '(self)\n', (10093, 10099), False, 'import _envcpp\n'), ((10408, 10438), '_envcpp.vectors_iterator', '_envcpp.vectors_iterator', (['self'], {}), '(self)\n', (10432, 10438), False, 'import _envcpp\n'), ((10537, 10570), '_envcpp.vectors___nonzero__', '_envcpp.vectors___nonzero__', (['self'], {}), '(self)\n', (10564, 10570), False, 'import _envcpp\n'), ((10611, 10641), '_envcpp.vectors___bool__', '_envcpp.vectors___bool__', (['self'], {}), '(self)\n', (10635, 10641), False, 'import _envcpp\n'), ((10681, 10710), '_envcpp.vectors___len__', '_envcpp.vectors___len__', (['self'], {}), '(self)\n', (10704, 10710), False, 'import _envcpp\n'), ((10761, 10801), '_envcpp.vectors___getslice__', '_envcpp.vectors___getslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (10789, 10801), False, 'import _envcpp\n'), ((10853, 10894), '_envcpp.vectors___setslice__', '_envcpp.vectors___setslice__', (['self', '*args'], {}), '(self, *args)\n', (10881, 10894), False, 'import _envcpp\n'), ((10945, 10985), '_envcpp.vectors___delslice__', '_envcpp.vectors___delslice__', (['self', 'i', 'j'], {}), '(self, i, j)\n', (10973, 10985), False, 'import _envcpp\n'), ((11036, 11076), '_envcpp.vectors___delitem__', '_envcpp.vectors___delitem__', (['self', '*args'], {}), '(self, *args)\n', (11063, 11076), False, 'import _envcpp\n'), ((11127, 11167), '_envcpp.vectors___getitem__', '_envcpp.vectors___getitem__', (['self', '*args'], {}), '(self, *args)\n', (11154, 11167), False, 'import _envcpp\n'), ((11218, 11258), '_envcpp.vectors___setitem__', '_envcpp.vectors___setitem__', (['self', '*args'], {}), '(self, *args)\n', (11245, 11258), False, 'import _envcpp\n'), ((11294, 11319), '_envcpp.vectors_pop', '_envcpp.vectors_pop', (['self'], {}), '(self)\n', (11313, 11319), False, 'import _envcpp\n'), ((11361, 11392), '_envcpp.vectors_append', '_envcpp.vectors_append', (['self', 'x'], {}), '(self, x)\n', (11383, 11392), False, 'import _envcpp\n'), ((11430, 11457), '_envcpp.vectors_empty', '_envcpp.vectors_empty', (['self'], {}), '(self)\n', (11451, 11457), False, 'import _envcpp\n'), ((11494, 11520), '_envcpp.vectors_size', '_envcpp.vectors_size', (['self'], {}), '(self)\n', (11514, 11520), False, 'import _envcpp\n'), ((11560, 11589), '_envcpp.vectors_swap', '_envcpp.vectors_swap', (['self', 'v'], {}), '(self, v)\n', (11580, 11589), False, 'import _envcpp\n'), ((11627, 11654), '_envcpp.vectors_begin', '_envcpp.vectors_begin', (['self'], {}), '(self)\n', (11648, 11654), False, 'import _envcpp\n'), ((11690, 11715), '_envcpp.vectors_end', '_envcpp.vectors_end', (['self'], {}), '(self)\n', (11709, 11715), False, 'import _envcpp\n'), ((11754, 11782), '_envcpp.vectors_rbegin', '_envcpp.vectors_rbegin', (['self'], {}), '(self)\n', (11776, 11782), False, 'import _envcpp\n'), ((11819, 11845), '_envcpp.vectors_rend', '_envcpp.vectors_rend', (['self'], {}), '(self)\n', (11839, 11845), False, 'import _envcpp\n'), ((11883, 11910), '_envcpp.vectors_clear', '_envcpp.vectors_clear', (['self'], {}), '(self)\n', (11904, 11910), False, 'import _envcpp\n'), ((11956, 11991), '_envcpp.vectors_get_allocator', '_envcpp.vectors_get_allocator', (['self'], {}), '(self)\n', (11985, 11991), False, 'import _envcpp\n'), ((12032, 12062), '_envcpp.vectors_pop_back', '_envcpp.vectors_pop_back', (['self'], {}), '(self)\n', (12056, 12062), False, 'import _envcpp\n'), ((12107, 12141), '_envcpp.vectors_erase', '_envcpp.vectors_erase', (['self', '*args'], {}), '(self, *args)\n', (12128, 12141), False, 'import _envcpp\n'), ((12285, 12319), '_envcpp.vectors_push_back', '_envcpp.vectors_push_back', (['self', 'x'], {}), '(self, x)\n', (12310, 12319), False, 'import _envcpp\n'), ((12357, 12384), '_envcpp.vectors_front', '_envcpp.vectors_front', (['self'], {}), '(self)\n', (12378, 12384), False, 'import _envcpp\n'), ((12421, 12447), '_envcpp.vectors_back', '_envcpp.vectors_back', (['self'], {}), '(self)\n', (12441, 12447), False, 'import _envcpp\n'), ((12492, 12526), '_envcpp.vectors_assign', '_envcpp.vectors_assign', (['self', 'n', 'x'], {}), '(self, n, x)\n', (12514, 12526), False, 'import _envcpp\n'), ((12572, 12607), '_envcpp.vectors_resize', '_envcpp.vectors_resize', (['self', '*args'], {}), '(self, *args)\n', (12594, 12607), False, 'import _envcpp\n'), ((12653, 12688), '_envcpp.vectors_insert', '_envcpp.vectors_insert', (['self', '*args'], {}), '(self, *args)\n', (12675, 12688), False, 'import _envcpp\n'), ((12731, 12763), '_envcpp.vectors_reserve', '_envcpp.vectors_reserve', (['self', 'n'], {}), '(self, n)\n', (12754, 12763), False, 'import _envcpp\n'), ((12804, 12834), '_envcpp.vectors_capacity', '_envcpp.vectors_capacity', (['self'], {}), '(self)\n', (12828, 12834), False, 'import _envcpp\n'), ((13335, 13396), '_envcpp.Environment_get_download_time', '_envcpp.Environment_get_download_time', (['self', 'video_chunk_size'], {}), '(self, video_chunk_size)\n', (13372, 13396), False, 'import _envcpp\n'), ((13448, 13493), '_envcpp.Environment_reset_download_time', '_envcpp.Environment_reset_download_time', (['self'], {}), '(self)\n', (13487, 13493), False, 'import _envcpp\n'), ((13550, 13600), '_envcpp.Environment_get_video_chunk', '_envcpp.Environment_get_video_chunk', (['self', 'quality'], {}), '(self, quality)\n', (13585, 13600), False, 'import _envcpp\n'), ((13661, 13715), '_envcpp.Environment_get_optimal', '_envcpp.Environment_get_optimal', (['self', 'last_video_vmaf'], {}), '(self, last_video_vmaf)\n', (13692, 13715), False, 'import _envcpp\n'), ((6743, 6769), '_envcpp.new_vectori', '_envcpp.new_vectori', (['*args'], {}), '(*args)\n', (6762, 6769), False, 'import _envcpp\n'), ((9478, 9504), '_envcpp.new_vectord', '_envcpp.new_vectord', (['*args'], {}), '(*args)\n', (9497, 9504), False, 'import _envcpp\n'), ((12213, 12239), '_envcpp.new_vectors', '_envcpp.new_vectors', (['*args'], {}), '(*args)\n', (12232, 12239), False, 'import _envcpp\n'), ((13184, 13216), '_envcpp.new_Environment', '_envcpp.new_Environment', (['filedir'], {}), '(filedir)\n', (13207, 13216), False, 'import _envcpp\n')] |
import click
import aiohttp
import asyncio
import re
import json
from typing import Optional, Tuple, Iterable, Union, List
from blspy import G2Element, AugSchemeMPL
from chia.cmds.wallet_funcs import get_wallet
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from chia.util.ints import uint16
from chia.util.byte_types import hexstr_to_bytes
from chia.types.blockchain_format.program import Program
from clvm_tools.clvmc import compile_clvm_text
from clvm_tools.binutils import assemble
from chia.types.spend_bundle import SpendBundle
from chia.wallet.cc_wallet.cc_utils import (
construct_cc_puzzle,
CC_MOD,
SpendableCC,
unsigned_spend_bundle_for_spendable_ccs,
)
from chia.util.bech32m import decode_puzzle_hash
# Loading the client requires the standard chia root directory configuration that all of the chia commands rely on
async def get_client() -> Optional[WalletRpcClient]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
full_node_rpc_port = config["wallet"]["rpc_port"]
full_node_client = await WalletRpcClient.create(
self_hostname, uint16(full_node_rpc_port), DEFAULT_ROOT_PATH, config
)
return full_node_client
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if full node is running at {full_node_rpc_port}"
)
else:
print(f"Exception from 'harvester' {e}")
return None
async def get_signed_tx(fingerprint, ph, amt, fee):
try:
wallet_client: WalletRpcClient = await get_client()
wallet_client_f, _ = await get_wallet(wallet_client, fingerprint)
return await wallet_client.create_signed_transaction(
[{"puzzle_hash": ph, "amount": amt}], fee=fee
)
finally:
wallet_client.close()
await wallet_client.await_closed()
# The clvm loaders in this library automatically search for includable files in the directory './include'
def append_include(search_paths: Iterable[str]) -> List[str]:
if search_paths:
search_list = list(search_paths)
search_list.append("./include")
return search_list
else:
return ["./include"]
def parse_program(program: Union[str, Program], include: Iterable = []) -> Program:
if isinstance(program, Program):
return program
else:
if "(" in program: # If it's raw clvm
prog = Program.to(assemble(program))
elif "." not in program: # If it's a byte string
prog = Program.from_bytes(hexstr_to_bytes(program))
else: # If it's a file
with open(program, "r") as file:
filestring: str = file.read()
if "(" in filestring: # If it's not compiled
# TODO: This should probably be more robust
if re.compile(r"\(mod\s").search(filestring): # If it's Chialisp
prog = Program.to(
compile_clvm_text(filestring, append_include(include))
)
else: # If it's CLVM
prog = Program.to(assemble(filestring))
else: # If it's serialized CLVM
prog = Program.from_bytes(hexstr_to_bytes(filestring))
return prog
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command()
@click.pass_context
@click.option(
"-l",
"--tail",
required=True,
help="The TAIL program to launch this CAT with",
)
@click.option(
"-c",
"--curry",
multiple=True,
help="An argument to curry into the TAIL",
)
@click.option(
"-s",
"--solution",
required=True,
default="()",
show_default=True,
help="The solution to the TAIL program",
)
@click.option(
"-t",
"--send-to",
required=True,
help="The address these CATs will appear at once they are issued",
)
@click.option(
"-a",
"--amount",
required=True,
type=int,
help="The amount to issue in mojos (regular XCH will be used to fund this)",
)
@click.option(
"-m",
"--fee",
required=True,
default=0,
show_default=True,
help="The XCH fee to use for this issuance",
)
@click.option(
"-f",
"--fingerprint",
type=int,
help="The wallet fingerprint to use as funds",
)
@click.option(
"-sig",
"--signature",
multiple=True,
help="A signature to aggregate with the transaction",
)
@click.option(
"-as",
"--spend",
multiple=True,
help="An additional spend to aggregate with the transaction",
)
@click.option(
"-b",
"--as-bytes",
is_flag=True,
help="Output the spend bundle as a sequence of bytes instead of JSON",
)
@click.option(
"-sc",
"--select-coin",
is_flag=True,
help="Stop the process once a coin from the wallet has been selected and return the coin",
)
def cli(
ctx: click.Context,
tail: str,
curry: Tuple[str],
solution: str,
send_to: str,
amount: int,
fee: int,
fingerprint: int,
signature: Tuple[str],
spend: Tuple[str],
as_bytes: bool,
select_coin: bool,
):
ctx.ensure_object(dict)
tail = parse_program(tail)
curried_args = [assemble(arg) for arg in curry]
solution = parse_program(solution)
address = decode_puzzle_hash(send_to)
aggregated_signature = G2Element()
for sig in signature:
aggregated_signature = AugSchemeMPL.aggregate(
[aggregated_signature, G2Element.from_bytes(hexstr_to_bytes(sig))]
)
aggregated_spend = SpendBundle([], G2Element())
for bundle in spend:
aggregated_spend = SpendBundle.aggregate(
[aggregated_spend, SpendBundle.from_bytes(hexstr_to_bytes(bundle))]
)
# Construct the TAIL
if len(curried_args) > 0:
curried_tail = tail.curry(*curried_args)
else:
curried_tail = tail
# Construct the intermediate puzzle
p2_puzzle = Program.to(
(1, [[51, 0, -113, curried_tail, solution], [51, address, amount, [address]]])
)
# Wrap the intermediate puzzle in a CAT wrapper
cat_puzzle = construct_cc_puzzle(CC_MOD, curried_tail.get_tree_hash(), p2_puzzle)
cat_ph = cat_puzzle.get_tree_hash()
# Get a signed transaction from the wallet
signed_tx = asyncio.get_event_loop().run_until_complete(
get_signed_tx(fingerprint, cat_ph, amount, fee)
)
eve_coin = list(
filter(lambda c: c.puzzle_hash == cat_ph, signed_tx.spend_bundle.additions())
)[0]
# This is where we exit if we're only looking for the selected coin
if select_coin:
primary_coin = list(
filter(lambda c: c.name() == eve_coin.parent_coin_info, signed_tx.spend_bundle.removals())
)[0]
print(json.dumps(primary_coin.to_json_dict(), sort_keys=True, indent=4))
print(f"Name: {primary_coin.name()}")
return
# Create the CAT spend
spendable_eve = SpendableCC(
eve_coin,
curried_tail.get_tree_hash(),
p2_puzzle,
Program.to([]),
limitations_solution=solution,
limitations_program_reveal=curried_tail,
)
eve_spend = unsigned_spend_bundle_for_spendable_ccs(CC_MOD, [spendable_eve])
# Aggregate everything together
final_bundle = SpendBundle.aggregate(
[
signed_tx.spend_bundle,
eve_spend,
aggregated_spend,
SpendBundle([], aggregated_signature),
]
)
if as_bytes:
final_bundle = bytes(final_bundle).hex()
else:
final_bundle = json.dumps(final_bundle.to_json_dict(), sort_keys=True, indent=4)
print(f"Asset ID: {curried_tail.get_tree_hash()}")
print(f"Spend Bundle: {final_bundle}")
def main():
cli()
if __name__ == "__main__":
main()
| [
"chia.wallet.cc_wallet.cc_utils.unsigned_spend_bundle_for_spendable_ccs",
"chia.util.bech32m.decode_puzzle_hash",
"re.compile",
"click.option",
"asyncio.get_event_loop",
"chia.util.config.load_config",
"clvm_tools.binutils.assemble",
"chia.cmds.wallet_funcs.get_wallet",
"chia.util.byte_types.hexstr_... | [((3581, 3596), 'click.command', 'click.command', ([], {}), '()\n', (3594, 3596), False, 'import click\n'), ((3618, 3715), 'click.option', 'click.option', (['"""-l"""', '"""--tail"""'], {'required': '(True)', 'help': '"""The TAIL program to launch this CAT with"""'}), "('-l', '--tail', required=True, help=\n 'The TAIL program to launch this CAT with')\n", (3630, 3715), False, 'import click\n'), ((3731, 3823), 'click.option', 'click.option', (['"""-c"""', '"""--curry"""'], {'multiple': '(True)', 'help': '"""An argument to curry into the TAIL"""'}), "('-c', '--curry', multiple=True, help=\n 'An argument to curry into the TAIL')\n", (3743, 3823), False, 'import click\n'), ((3839, 3965), 'click.option', 'click.option', (['"""-s"""', '"""--solution"""'], {'required': '(True)', 'default': '"""()"""', 'show_default': '(True)', 'help': '"""The solution to the TAIL program"""'}), "('-s', '--solution', required=True, default='()', show_default=\n True, help='The solution to the TAIL program')\n", (3851, 3965), False, 'import click\n'), ((3989, 4107), 'click.option', 'click.option', (['"""-t"""', '"""--send-to"""'], {'required': '(True)', 'help': '"""The address these CATs will appear at once they are issued"""'}), "('-t', '--send-to', required=True, help=\n 'The address these CATs will appear at once they are issued')\n", (4001, 4107), False, 'import click\n'), ((4123, 4260), 'click.option', 'click.option', (['"""-a"""', '"""--amount"""'], {'required': '(True)', 'type': 'int', 'help': '"""The amount to issue in mojos (regular XCH will be used to fund this)"""'}), "('-a', '--amount', required=True, type=int, help=\n 'The amount to issue in mojos (regular XCH will be used to fund this)')\n", (4135, 4260), False, 'import click\n'), ((4280, 4401), 'click.option', 'click.option', (['"""-m"""', '"""--fee"""'], {'required': '(True)', 'default': '(0)', 'show_default': '(True)', 'help': '"""The XCH fee to use for this issuance"""'}), "('-m', '--fee', required=True, default=0, show_default=True,\n help='The XCH fee to use for this issuance')\n", (4292, 4401), False, 'import click\n'), ((4426, 4523), 'click.option', 'click.option', (['"""-f"""', '"""--fingerprint"""'], {'type': 'int', 'help': '"""The wallet fingerprint to use as funds"""'}), "('-f', '--fingerprint', type=int, help=\n 'The wallet fingerprint to use as funds')\n", (4438, 4523), False, 'import click\n'), ((4539, 4648), 'click.option', 'click.option', (['"""-sig"""', '"""--signature"""'], {'multiple': '(True)', 'help': '"""A signature to aggregate with the transaction"""'}), "('-sig', '--signature', multiple=True, help=\n 'A signature to aggregate with the transaction')\n", (4551, 4648), False, 'import click\n'), ((4664, 4776), 'click.option', 'click.option', (['"""-as"""', '"""--spend"""'], {'multiple': '(True)', 'help': '"""An additional spend to aggregate with the transaction"""'}), "('-as', '--spend', multiple=True, help=\n 'An additional spend to aggregate with the transaction')\n", (4676, 4776), False, 'import click\n'), ((4792, 4914), 'click.option', 'click.option', (['"""-b"""', '"""--as-bytes"""'], {'is_flag': '(True)', 'help': '"""Output the spend bundle as a sequence of bytes instead of JSON"""'}), "('-b', '--as-bytes', is_flag=True, help=\n 'Output the spend bundle as a sequence of bytes instead of JSON')\n", (4804, 4914), False, 'import click\n'), ((4930, 5081), 'click.option', 'click.option', (['"""-sc"""', '"""--select-coin"""'], {'is_flag': '(True)', 'help': '"""Stop the process once a coin from the wallet has been selected and return the coin"""'}), "('-sc', '--select-coin', is_flag=True, help=\n 'Stop the process once a coin from the wallet has been selected and return the coin'\n )\n", (4942, 5081), False, 'import click\n'), ((5513, 5540), 'chia.util.bech32m.decode_puzzle_hash', 'decode_puzzle_hash', (['send_to'], {}), '(send_to)\n', (5531, 5540), False, 'from chia.util.bech32m import decode_puzzle_hash\n'), ((5569, 5580), 'blspy.G2Element', 'G2Element', ([], {}), '()\n', (5578, 5580), False, 'from blspy import G2Element, AugSchemeMPL\n'), ((6169, 6263), 'chia.types.blockchain_format.program.Program.to', 'Program.to', (['(1, [[51, 0, -113, curried_tail, solution], [51, address, amount, [address]]])'], {}), '((1, [[51, 0, -113, curried_tail, solution], [51, address, amount,\n [address]]]))\n', (6179, 6263), False, 'from chia.types.blockchain_format.program import Program\n'), ((7391, 7455), 'chia.wallet.cc_wallet.cc_utils.unsigned_spend_bundle_for_spendable_ccs', 'unsigned_spend_bundle_for_spendable_ccs', (['CC_MOD', '[spendable_eve]'], {}), '(CC_MOD, [spendable_eve])\n', (7430, 7455), False, 'from chia.wallet.cc_wallet.cc_utils import construct_cc_puzzle, CC_MOD, SpendableCC, unsigned_spend_bundle_for_spendable_ccs\n'), ((1028, 1073), 'chia.util.config.load_config', 'load_config', (['DEFAULT_ROOT_PATH', '"""config.yaml"""'], {}), "(DEFAULT_ROOT_PATH, 'config.yaml')\n", (1039, 1073), False, 'from chia.util.config import load_config\n'), ((5428, 5441), 'clvm_tools.binutils.assemble', 'assemble', (['arg'], {}), '(arg)\n', (5436, 5441), False, 'from clvm_tools.binutils import assemble\n'), ((5791, 5802), 'blspy.G2Element', 'G2Element', ([], {}), '()\n', (5800, 5802), False, 'from blspy import G2Element, AugSchemeMPL\n'), ((7265, 7279), 'chia.types.blockchain_format.program.Program.to', 'Program.to', (['[]'], {}), '([])\n', (7275, 7279), False, 'from chia.types.blockchain_format.program import Program\n'), ((1812, 1850), 'chia.cmds.wallet_funcs.get_wallet', 'get_wallet', (['wallet_client', 'fingerprint'], {}), '(wallet_client, fingerprint)\n', (1822, 1850), False, 'from chia.cmds.wallet_funcs import get_wallet\n'), ((6517, 6541), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6539, 6541), False, 'import asyncio\n'), ((7646, 7683), 'chia.types.spend_bundle.SpendBundle', 'SpendBundle', (['[]', 'aggregated_signature'], {}), '([], aggregated_signature)\n', (7657, 7683), False, 'from chia.types.spend_bundle import SpendBundle\n'), ((1264, 1290), 'chia.util.ints.uint16', 'uint16', (['full_node_rpc_port'], {}), '(full_node_rpc_port)\n', (1270, 1290), False, 'from chia.util.ints import uint16\n'), ((2638, 2655), 'clvm_tools.binutils.assemble', 'assemble', (['program'], {}), '(program)\n', (2646, 2655), False, 'from clvm_tools.binutils import assemble\n'), ((2753, 2777), 'chia.util.byte_types.hexstr_to_bytes', 'hexstr_to_bytes', (['program'], {}), '(program)\n', (2768, 2777), False, 'from chia.util.byte_types import hexstr_to_bytes\n'), ((5718, 5738), 'chia.util.byte_types.hexstr_to_bytes', 'hexstr_to_bytes', (['sig'], {}), '(sig)\n', (5733, 5738), False, 'from chia.util.byte_types import hexstr_to_bytes\n'), ((5933, 5956), 'chia.util.byte_types.hexstr_to_bytes', 'hexstr_to_bytes', (['bundle'], {}), '(bundle)\n', (5948, 5956), False, 'from chia.util.byte_types import hexstr_to_bytes\n'), ((3467, 3494), 'chia.util.byte_types.hexstr_to_bytes', 'hexstr_to_bytes', (['filestring'], {}), '(filestring)\n', (3482, 3494), False, 'from chia.util.byte_types import hexstr_to_bytes\n'), ((3051, 3074), 're.compile', 're.compile', (['"""\\\\(mod\\\\s"""'], {}), "('\\\\(mod\\\\s')\n", (3061, 3074), False, 'import re\n'), ((3350, 3370), 'clvm_tools.binutils.assemble', 'assemble', (['filestring'], {}), '(filestring)\n', (3358, 3370), False, 'from clvm_tools.binutils import assemble\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME>
# Date: 2019-4-28
import tensorflow as tf
from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer
from tensorflow.nn import dropout
import numpy as np
n_z = 3584
n_y = 300
MSVD_PATH = None
MSRVTT_PATH = None
MSVD_GT_PATH = None
MSRVTT_GT_PATH = None
max_epochs = 1000
lr = 0.0002
batch_size = 128
keep_prob = 1.0
batch_size = 64
class TagNet():
def __init__(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.y = placeholder(tf.float32, [None, n_y])
self.z = placeholder(tf.float32, [None, n_z])
self.keep_prob = placeholder(tf.float32, [])
self.Wy1 = tf.get_variable('Wy1', [n_z, 512], tf.float32, glorot_normal_initializer())
self.by1 = tf.get_variable('by1', [512], tf.float32, zeros_initializer())
self.Wy2 = tf.get_variable('Wy2', [512, 512], tf.float32, glorot_normal_initializer())
self.by2 = tf.get_variable('by2', [512], tf.float32, zeros_initializer())
self.Wy3 = tf.get_variable('Wy3', [512, n_y], tf.float32, glorot_normal_initializer())
self.by3 = tf.get_variable('by3', [n_y], tf.float32, zeros_initializer())
z = dropout(self.z, self.keep_prob)
h = tf.nn.relu(tf.matmul(z, self.Wy1) + self.by1)
h = dropout(h, self.keep_prob)
h = tf.nn.relu(tf.matmul(h, self.Wy2) + self.by2)
h = dropout(h, self.keep_prob)
self.pred = tf.sigmoid(tf.matmul(h, self.Wy3) + self.by3)
cost = -self.y * tf.log(self.pred + 1e-6) - (1. - self.y) * tf.log(1. - self.pred + 1e-6)
self.cost = tf.reduce_mean(tf.reduce_sum(cost, 1))
self.pred_mask = tf.cast(self.pred >= 0.5, tf.int32)
self.tmp = tf.cast(self.y, tf.int32)
self.acc_mask = tf.cast(tf.equal(self.tmp, self.pred_mask), tf.float32)
self.acc = tf.reduce_mean(self.acc_mask)
| [
"tensorflow.Graph",
"tensorflow.equal",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.zeros_initializer",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.glorot_normal_initializer"
] | [((461, 471), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (469, 471), True, 'import tensorflow as tf\n'), ((531, 567), 'tensorflow.placeholder', 'placeholder', (['tf.float32', '[None, n_y]'], {}), '(tf.float32, [None, n_y])\n', (542, 567), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((589, 625), 'tensorflow.placeholder', 'placeholder', (['tf.float32', '[None, n_z]'], {}), '(tf.float32, [None, n_z])\n', (600, 625), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((655, 682), 'tensorflow.placeholder', 'placeholder', (['tf.float32', '[]'], {}), '(tf.float32, [])\n', (666, 682), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((1255, 1286), 'tensorflow.nn.dropout', 'dropout', (['self.z', 'self.keep_prob'], {}), '(self.z, self.keep_prob)\n', (1262, 1286), False, 'from tensorflow.nn import dropout\n'), ((1365, 1391), 'tensorflow.nn.dropout', 'dropout', (['h', 'self.keep_prob'], {}), '(h, self.keep_prob)\n', (1372, 1391), False, 'from tensorflow.nn import dropout\n'), ((1470, 1496), 'tensorflow.nn.dropout', 'dropout', (['h', 'self.keep_prob'], {}), '(h, self.keep_prob)\n', (1477, 1496), False, 'from tensorflow.nn import dropout\n'), ((1764, 1799), 'tensorflow.cast', 'tf.cast', (['(self.pred >= 0.5)', 'tf.int32'], {}), '(self.pred >= 0.5, tf.int32)\n', (1771, 1799), True, 'import tensorflow as tf\n'), ((1823, 1848), 'tensorflow.cast', 'tf.cast', (['self.y', 'tf.int32'], {}), '(self.y, tf.int32)\n', (1830, 1848), True, 'import tensorflow as tf\n'), ((1956, 1985), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.acc_mask'], {}), '(self.acc_mask)\n', (1970, 1985), True, 'import tensorflow as tf\n'), ((753, 780), 'tensorflow.glorot_normal_initializer', 'glorot_normal_initializer', ([], {}), '()\n', (778, 780), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((847, 866), 'tensorflow.zeros_initializer', 'zeros_initializer', ([], {}), '()\n', (864, 866), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((938, 965), 'tensorflow.glorot_normal_initializer', 'glorot_normal_initializer', ([], {}), '()\n', (963, 965), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((1032, 1051), 'tensorflow.zeros_initializer', 'zeros_initializer', ([], {}), '()\n', (1049, 1051), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((1123, 1150), 'tensorflow.glorot_normal_initializer', 'glorot_normal_initializer', ([], {}), '()\n', (1148, 1150), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((1217, 1236), 'tensorflow.zeros_initializer', 'zeros_initializer', ([], {}), '()\n', (1234, 1236), False, 'from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer\n'), ((1710, 1732), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cost', '(1)'], {}), '(cost, 1)\n', (1723, 1732), True, 'import tensorflow as tf\n'), ((1885, 1919), 'tensorflow.equal', 'tf.equal', (['self.tmp', 'self.pred_mask'], {}), '(self.tmp, self.pred_mask)\n', (1893, 1919), True, 'import tensorflow as tf\n'), ((1314, 1336), 'tensorflow.matmul', 'tf.matmul', (['z', 'self.Wy1'], {}), '(z, self.Wy1)\n', (1323, 1336), True, 'import tensorflow as tf\n'), ((1419, 1441), 'tensorflow.matmul', 'tf.matmul', (['h', 'self.Wy2'], {}), '(h, self.Wy2)\n', (1428, 1441), True, 'import tensorflow as tf\n'), ((1533, 1555), 'tensorflow.matmul', 'tf.matmul', (['h', 'self.Wy3'], {}), '(h, self.Wy3)\n', (1542, 1555), True, 'import tensorflow as tf\n'), ((1598, 1623), 'tensorflow.log', 'tf.log', (['(self.pred + 1e-06)'], {}), '(self.pred + 1e-06)\n', (1604, 1623), True, 'import tensorflow as tf\n'), ((1641, 1672), 'tensorflow.log', 'tf.log', (['(1.0 - self.pred + 1e-06)'], {}), '(1.0 - self.pred + 1e-06)\n', (1647, 1672), True, 'import tensorflow as tf\n')] |
"""
Explore raw composites based on indices from predicted testing data and
showing all the difference OHC levels for OBSERVATIONS
Author : <NAME>
Date : 21 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
import calc_Utilities as UT
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_dataFunctions as df
import calc_Stats as dSS
from netCDF4 import Dataset
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CESM2le']
dataset_obs = 'ERA5'
allDataLabels = modelGCMs
monthlychoiceq = ['annual']
variables = ['T2M']
vari_predict = ['SST','OHC100','OHC300','OHC700']
reg_name = 'SMILEGlobe'
level = 'surface'
###############################################################################
###############################################################################
randomalso = False
timeper = 'hiatus'
shuffletype = 'GAUSS'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
if window == 0:
rm_standard_dev = False
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
ravelmodeltime = False
ravel_modelens = True
yearsall = np.arange(1979+window,2099+1,1)
yearsobs = np.arange(1979+window,2020+1,1)
###############################################################################
###############################################################################
numOfEns = 40
lentime = len(yearsall)
###############################################################################
###############################################################################
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
###############################################################################
###############################################################################
### Remove ensemble mean
rm_ensemble_mean = True
###############################################################################
###############################################################################
### Accuracy for composites
accurate = True
if accurate == True:
typemodel = 'correcthiatus_obs'
elif accurate == False:
typemodel = 'extrahiatus_obs'
elif accurate == 'WRONG':
typemodel = 'wronghiatus_obs'
elif accurate == 'HIATUS':
typemodel = 'allhiatus_obs'
###############################################################################
###############################################################################
### Call functions
trendlength = 10
AGWstart = 1990
years_newmodel = np.arange(AGWstart,yearsall[-1]-8,1)
years_newobs = np.arange(AGWstart,yearsobs[-1]-8,1)
vv = 0
mo = 0
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Obs/'
saveData = monthlychoice + '_' + variq + '_' + reg_name + '_' + dataset_obs
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
### Function to read in predictor variables (SST/OHC)
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
### Loop through to read all the variables
ohcHIATUS = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUS[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
###############################################################################
###############################################################################
### Loop through to read all the variables
lag1 = 3
lag2 = 7
lag = lag2-lag1
ohcHIATUSlag = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUSlag[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
### Composite all for plotting
ohc_allcomp = np.append(ohcHIATUS,ohcHIATUSlag,axis=0)
###############################################################################
###############################################################################
### Plot subplot of obser+++++++++++++++vations
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
plotloc = [1,3,5,7,2,4,6,8]
if rm_ensemble_mean == False:
limit = np.arange(-1.5,1.51,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
elif rm_ensemble_mean == True:
limit = np.arange(-1.5,1.6,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
cmap = cmocean.cm.balance
label = r'\textbf{[ HIATUS COMPOSITE ]}'
fig = plt.figure(figsize=(8,10))
###############################################################################
for ppp in range(ohc_allcomp.shape[0]):
ax1 = plt.subplot(ohc_allcomp.shape[0]//2,2,plotloc[ppp])
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
varn = ohc_allcomp[ppp]
if ppp == 0:
lons = np.where(lons >180,lons-360,lons)
x, y = np.meshgrid(lons,lats)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,varn,limit,extend='both',latlon=True)
cs1.set_cmap(cmap)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
ax1.annotate(r'\textbf{[%s]}' % letters[ppp],xy=(0,0),xytext=(0.95,0.93),
textcoords='axes fraction',color='k',fontsize=10,
rotation=0,ha='center',va='center')
if ppp < 4:
ax1.annotate(r'\textbf{%s}' % vari_predict[ppp],xy=(0,0),xytext=(-0.08,0.5),
textcoords='axes fraction',color='dimgrey',fontsize=20,
rotation=90,ha='center',va='center')
if ppp == 0:
plt.title(r'\textbf{Onset}',fontsize=15,color='k')
if ppp == 4:
plt.title(r'\textbf{%s-Year Composite}' % lag,fontsize=15,color='k')
###############################################################################
cbar_ax1 = fig.add_axes([0.38,0.05,0.3,0.02])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=6,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=4)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(bottom=0.08,wspace=0.01)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png' % (lag,accurate,accurate),dpi=300)
else:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' % (lag,accurate,accurate),dpi=300) | [
"calc_dataFunctions.getRegion",
"numpy.nanmean",
"sys.exit",
"numpy.genfromtxt",
"numpy.arange",
"numpy.where",
"numpy.asarray",
"numpy.meshgrid",
"calc_Utilities.regions",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"calc_Stats.remove_trend_obs",
"matplotlib.py... | [((566, 593), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (572, 593), True, 'import matplotlib.pyplot as plt\n'), ((593, 666), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (599, 666), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1755), 'numpy.arange', 'np.arange', (['(1951)', '(1980 + 1)', '(1)'], {}), '(1951, 1980 + 1, 1)\n', (1736, 1755), True, 'import numpy as np\n'), ((2118, 2155), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2099 + 1)', '(1)'], {}), '(1979 + window, 2099 + 1, 1)\n', (2127, 2155), True, 'import numpy as np\n'), ((2161, 2198), 'numpy.arange', 'np.arange', (['(1979 + window)', '(2020 + 1)', '(1)'], {}), '(1979 + window, 2020 + 1, 1)\n', (2170, 2198), True, 'import numpy as np\n'), ((2575, 2595), 'calc_Utilities.regions', 'UT.regions', (['reg_name'], {}), '(reg_name)\n', (2585, 2595), True, 'import calc_Utilities as UT\n'), ((3694, 3734), 'numpy.arange', 'np.arange', (['AGWstart', '(yearsall[-1] - 8)', '(1)'], {}), '(AGWstart, yearsall[-1] - 8, 1)\n', (3703, 3734), True, 'import numpy as np\n'), ((3746, 3786), 'numpy.arange', 'np.arange', (['AGWstart', '(yearsobs[-1] - 8)', '(1)'], {}), '(AGWstart, yearsobs[-1] - 8, 1)\n', (3755, 3786), True, 'import numpy as np\n'), ((15455, 15497), 'numpy.append', 'np.append', (['ohcHIATUS', 'ohcHIATUSlag'], {'axis': '(0)'}), '(ohcHIATUS, ohcHIATUSlag, axis=0)\n', (15464, 15497), True, 'import numpy as np\n'), ((16109, 16136), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (16119, 16136), True, 'import matplotlib.pyplot as plt\n'), ((18042, 18060), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18058, 18060), True, 'import matplotlib.pyplot as plt\n'), ((18061, 18106), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.08)', 'wspace': '(0.01)'}), '(bottom=0.08, wspace=0.01)\n', (18080, 18106), True, 'import matplotlib.pyplot as plt\n'), ((4458, 4590), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset, monthlychoice, numOfEns, lensalso, randomalso,\n ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (4470, 4590), True, 'import calc_dataFunctions as df\n'), ((4600, 4654), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data', 'lats', 'lons', 'lat_bounds', 'lon_bounds'], {}), '(data, lats, lons, lat_bounds, lon_bounds)\n', (4612, 4654), True, 'import calc_dataFunctions as df\n'), ((4930, 5066), 'calc_dataFunctions.readFiles', 'df.readFiles', (['variq', 'dataset_obs', 'monthlychoice', 'numOfEns', 'lensalso', 'randomalso', 'ravelyearsbinary', 'ravelbinary', 'shuffletype', 'timeper'], {}), '(variq, dataset_obs, monthlychoice, numOfEns, lensalso,\n randomalso, ravelyearsbinary, ravelbinary, shuffletype, timeper)\n', (4942, 5066), True, 'import calc_dataFunctions as df\n'), ((5087, 5153), 'calc_dataFunctions.getRegion', 'df.getRegion', (['data_obs', 'lats_obs', 'lons_obs', 'lat_bounds', 'lon_bounds'], {}), '(data_obs, lats_obs, lons_obs, lat_bounds, lon_bounds)\n', (5099, 5153), True, 'import calc_dataFunctions as df\n'), ((6427, 6462), 'numpy.nanmean', 'np.nanmean', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (6437, 6462), True, 'import numpy as np\n'), ((6475, 6509), 'numpy.nanstd', 'np.nanstd', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (6484, 6509), True, 'import numpy as np\n'), ((8722, 8791), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsActualLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsActualLabels_' + savename + '.txt')\n", (8735, 8791), True, 'import numpy as np\n'), ((8811, 8874), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsLabels_' + savename + '.txt')\n", (8824, 8874), True, 'import numpy as np\n'), ((11457, 11492), 'numpy.nanmean', 'np.nanmean', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (11467, 11492), True, 'import numpy as np\n'), ((11505, 11539), 'numpy.nanstd', 'np.nanstd', (['models_varravel'], {'axis': '(0)'}), '(models_varravel, axis=0)\n', (11514, 11539), True, 'import numpy as np\n'), ((13752, 13821), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsActualLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsActualLabels_' + savename + '.txt')\n", (13765, 13821), True, 'import numpy as np\n'), ((13841, 13904), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'obsLabels_' + savename + '.txt')"], {}), "(directorydata + 'obsLabels_' + savename + '.txt')\n", (13854, 13904), True, 'import numpy as np\n'), ((15843, 15870), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.51)', '(0.02)'], {}), '(-1.5, 1.51, 0.02)\n', (15852, 15870), True, 'import numpy as np\n'), ((16266, 16321), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(ohc_allcomp.shape[0] // 2)', '(2)', 'plotloc[ppp]'], {}), '(ohc_allcomp.shape[0] // 2, 2, plotloc[ppp])\n', (16277, 16321), True, 'import matplotlib.pyplot as plt\n'), ((16326, 16400), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""robin"""', 'lon_0': '(-180)', 'resolution': '"""l"""', 'area_thresh': '(10000)'}), "(projection='robin', lon_0=-180, resolution='l', area_thresh=10000)\n", (16333, 16400), False, 'from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid\n'), ((18139, 18305), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png'\n % (lag, accurate, accurate))"], {'dpi': '(300)'}), "(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png'\n % (lag, accurate, accurate), dpi=300)\n", (18150, 18305), True, 'import matplotlib.pyplot as plt\n'), ((18303, 18453), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' %\n (lag, accurate, accurate))"], {'dpi': '(300)'}), "(directoryfigure + \n 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' %\n (lag, accurate, accurate), dpi=300)\n", (18314, 18453), True, 'import matplotlib.pyplot as plt\n'), ((6191, 6234), 'calc_Stats.remove_trend_obs', 'dSS.remove_trend_obs', (['models_var', '"""surface"""'], {}), "(models_var, 'surface')\n", (6211, 6234), True, 'import calc_Stats as dSS\n'), ((6712, 6742), 'numpy.where', 'np.where', (['(yearsobs >= AGWstart)'], {}), '(yearsobs >= AGWstart)\n', (6720, 6742), True, 'import numpy as np\n'), ((10213, 10239), 'numpy.asarray', 'np.asarray', (['ohc_allenscomp'], {}), '(ohc_allenscomp)\n', (10223, 10239), True, 'import numpy as np\n'), ((11221, 11264), 'calc_Stats.remove_trend_obs', 'dSS.remove_trend_obs', (['models_var', '"""surface"""'], {}), "(models_var, 'surface')\n", (11241, 11264), True, 'import calc_Stats as dSS\n'), ((11742, 11772), 'numpy.where', 'np.where', (['(yearsobs >= AGWstart)'], {}), '(yearsobs >= AGWstart)\n', (11750, 11772), True, 'import numpy as np\n'), ((15374, 15400), 'numpy.asarray', 'np.asarray', (['ohc_allenscomp'], {}), '(ohc_allenscomp)\n', (15384, 15400), True, 'import numpy as np\n'), ((15891, 15916), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.5)'], {}), '(-1.5, 1.6, 0.5)\n', (15900, 15916), True, 'import numpy as np\n'), ((15961, 15987), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.02)'], {}), '(-1.5, 1.6, 0.02)\n', (15970, 15987), True, 'import numpy as np\n'), ((16538, 16576), 'numpy.where', 'np.where', (['(lons > 180)', '(lons - 360)', 'lons'], {}), '(lons > 180, lons - 360, lons)\n', (16546, 16576), True, 'import numpy as np\n'), ((16587, 16610), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (16598, 16610), True, 'import numpy as np\n'), ((17381, 17433), 'matplotlib.pyplot.title', 'plt.title', (['"""\\\\textbf{Onset}"""'], {'fontsize': '(15)', 'color': '"""k"""'}), "('\\\\textbf{Onset}', fontsize=15, color='k')\n", (17390, 17433), True, 'import matplotlib.pyplot as plt\n'), ((17458, 17528), 'matplotlib.pyplot.title', 'plt.title', (["('\\\\textbf{%s-Year Composite}' % lag)"], {'fontsize': '(15)', 'color': '"""k"""'}), "('\\\\textbf{%s-Year Composite}' % lag, fontsize=15, color='k')\n", (17467, 17528), True, 'import matplotlib.pyplot as plt\n'), ((6070, 6092), 'numpy.asarray', 'np.asarray', (['models_var'], {}), '(models_var)\n', (6080, 6092), True, 'import numpy as np\n'), ((6905, 7025), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (6918, 7025), True, 'import numpy as np\n'), ((7745, 7755), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7753, 7755), False, 'import sys\n'), ((11100, 11122), 'numpy.asarray', 'np.asarray', (['models_var'], {}), '(models_var)\n', (11110, 11122), True, 'import numpy as np\n'), ((11935, 12055), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (11948, 12055), True, 'import numpy as np\n'), ((12775, 12785), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12783, 12785), False, 'import sys\n'), ((16008, 16033), 'numpy.arange', 'np.arange', (['(-1.5)', '(1.6)', '(0.5)'], {}), '(-1.5, 1.6, 0.5)\n', (16017, 16033), True, 'import numpy as np\n'), ((7336, 7456), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (7349, 7456), True, 'import numpy as np\n'), ((12366, 12486), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt"""'], {'unpack': '(True)'}), "(\n '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt'\n , unpack=True)\n", (12379, 12486), True, 'import numpy as np\n'), ((10106, 10116), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10114, 10116), False, 'import sys\n'), ((14340, 14395), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14350, 14395), True, 'import numpy as np\n'), ((15264, 15274), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15272, 15274), False, 'import sys\n'), ((14593, 14648), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14603, 14648), True, 'import numpy as np\n'), ((14869, 14924), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (14879, 14924), True, 'import numpy as np\n'), ((15122, 15177), 'numpy.nanmean', 'np.nanmean', (['ohcready[yr + lag1:yr + lag2, :, :]'], {'axis': '(0)'}), '(ohcready[yr + lag1:yr + lag2, :, :], axis=0)\n', (15132, 15177), True, 'import numpy as np\n')] |
from setuptools import setup, find_packages
from os import path
from time import time
here = path.abspath(path.dirname(__file__))
if path.exists("VERSION.txt"):
# this file can be written by CI tools (e.g. Travis)
with open("VERSION.txt") as version_file:
version = version_file.read().strip().strip("v")
else:
version = str(time())
setup(
name='ckan_cloud_operator',
version=version,
description='''CKAN Cloud Kubernetes operator''',
url='https://github.com/datopian/ckan-cloud-operator',
author='''Viderum''',
license='MIT',
packages=find_packages(exclude=['examples', 'tests', '.tox']),
install_requires=[
'httpagentparser',
'boto3',
'coverage',
'psycopg2',
# 'pyyaml<5.2,>=3.10',
'kubernetes',
'click',
'toml',
# 'dataflows>=0.0.37',
# 'dataflows-shell>=0.0.8',
# 'jupyterlab',
'awscli',
'urllib3<1.25',
'ruamel.yaml<1',
'requests==2.21',
# 'python-dateutil<2.8.1',
'botocore',
],
entry_points={
'console_scripts': [
'ckan-cloud-operator = ckan_cloud_operator.cli:main',
]
},
)
| [
"os.path.dirname",
"os.path.exists",
"setuptools.find_packages",
"time.time"
] | [((135, 161), 'os.path.exists', 'path.exists', (['"""VERSION.txt"""'], {}), "('VERSION.txt')\n", (146, 161), False, 'from os import path\n'), ((107, 129), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'from os import path\n'), ((347, 353), 'time.time', 'time', ([], {}), '()\n', (351, 353), False, 'from time import time\n'), ((587, 639), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['examples', 'tests', '.tox']"}), "(exclude=['examples', 'tests', '.tox'])\n", (600, 639), False, 'from setuptools import setup, find_packages\n')] |
import os
import os.path
def activate(ipython, venv):
"""
Shortcut to run execfile() on `venv`/bin/activate_this.py
"""
venv = os.path.abspath(venv)
venv_activate = os.path.join(venv, 'bin', 'activate_this.py')
if not os.path.exists(venv_activate):
print('Not a virtualenv: {}'.format(venv))
return
# activate_this.py doesn't set VIRTUAL_ENV, so we must set it here
os.environ['VIRTUAL_ENV'] = venv
os.putenv('VIRTUAL_ENV', venv)
execfile(venv_activate, {'__file__': venv_activate})
print('Activated: {}'.format(venv))
def load(ipython):
ipython.define_magic('activate', activate)
| [
"os.path.abspath",
"os.path.exists",
"os.putenv",
"os.path.join"
] | [((145, 166), 'os.path.abspath', 'os.path.abspath', (['venv'], {}), '(venv)\n', (160, 166), False, 'import os\n'), ((187, 232), 'os.path.join', 'os.path.join', (['venv', '"""bin"""', '"""activate_this.py"""'], {}), "(venv, 'bin', 'activate_this.py')\n", (199, 232), False, 'import os\n'), ((455, 485), 'os.putenv', 'os.putenv', (['"""VIRTUAL_ENV"""', 'venv'], {}), "('VIRTUAL_ENV', venv)\n", (464, 485), False, 'import os\n'), ((245, 274), 'os.path.exists', 'os.path.exists', (['venv_activate'], {}), '(venv_activate)\n', (259, 274), False, 'import os\n')] |
from django.test import TestCase
from django_hosts import reverse
from util.test_utils import Get, assert_requesting_paths_succeeds
class UrlTests(TestCase):
def test_all_get_request_paths_succeed(self):
path_predicates = [
Get(reverse('skills_present_list'), public=True),
Get(reverse('profile'), public=False),
Get(reverse('suggest'), public=False),
]
assert_requesting_paths_succeeds(self, path_predicates)
| [
"django_hosts.reverse",
"util.test_utils.assert_requesting_paths_succeeds"
] | [((422, 477), 'util.test_utils.assert_requesting_paths_succeeds', 'assert_requesting_paths_succeeds', (['self', 'path_predicates'], {}), '(self, path_predicates)\n', (454, 477), False, 'from util.test_utils import Get, assert_requesting_paths_succeeds\n'), ((256, 286), 'django_hosts.reverse', 'reverse', (['"""skills_present_list"""'], {}), "('skills_present_list')\n", (263, 286), False, 'from django_hosts import reverse\n'), ((318, 336), 'django_hosts.reverse', 'reverse', (['"""profile"""'], {}), "('profile')\n", (325, 336), False, 'from django_hosts import reverse\n'), ((369, 387), 'django_hosts.reverse', 'reverse', (['"""suggest"""'], {}), "('suggest')\n", (376, 387), False, 'from django_hosts import reverse\n')] |
from django.contrib import admin
# Register your models here.
from .models import WorkOrder
admin.site.register(WorkOrder)
| [
"django.contrib.admin.site.register"
] | [((94, 124), 'django.contrib.admin.site.register', 'admin.site.register', (['WorkOrder'], {}), '(WorkOrder)\n', (113, 124), False, 'from django.contrib import admin\n')] |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.clip_grad import clip_grad_norm_
from mpi_utils.mpi_utils import sync_grads
def update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg):
if cfg.automatic_entropy_tuning:
alpha_loss = -(log_alpha * (log_pi + target_entropy).detach()).mean()
alpha_optim.zero_grad()
alpha_loss.backward()
alpha_optim.step()
alpha = log_alpha.exp()
alpha_tlogs = alpha.clone()
else:
alpha_loss = torch.tensor(0.)
alpha_tlogs = torch.tensor(alpha)
return alpha_loss, alpha_tlogs
def update_flat(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, ag_norm, g_norm, obs_next_norm, actions, rewards, cfg):
inputs_norm = np.concatenate([obs_norm, ag_norm, g_norm], axis=1)
inputs_next_norm = np.concatenate([obs_next_norm, ag_norm, g_norm], axis=1)
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
def update_language(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, instruction, obs_next_norm, actions, rewards, cfg):
inputs_norm = obs_norm
inputs_next_norm = obs_next_norm
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
instruction_tensor = torch.tensor(instruction, dtype=torch.long)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
instruction_tensor = instruction_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor, instruction_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next, instruction_tensor)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor, instruction_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor, instruction_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi, instruction_tensor)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
| [
"torch.nn.functional.mse_loss",
"torch.min",
"torch.tensor",
"numpy.concatenate",
"mpi_utils.mpi_utils.sync_grads",
"torch.no_grad"
] | [((889, 940), 'numpy.concatenate', 'np.concatenate', (['[obs_norm, ag_norm, g_norm]'], {'axis': '(1)'}), '([obs_norm, ag_norm, g_norm], axis=1)\n', (903, 940), True, 'import numpy as np\n'), ((964, 1020), 'numpy.concatenate', 'np.concatenate', (['[obs_next_norm, ag_norm, g_norm]'], {'axis': '(1)'}), '([obs_next_norm, ag_norm, g_norm], axis=1)\n', (978, 1020), True, 'import numpy as np\n'), ((1047, 1093), 'torch.tensor', 'torch.tensor', (['inputs_norm'], {'dtype': 'torch.float32'}), '(inputs_norm, dtype=torch.float32)\n', (1059, 1093), False, 'import torch\n'), ((1124, 1175), 'torch.tensor', 'torch.tensor', (['inputs_next_norm'], {'dtype': 'torch.float32'}), '(inputs_next_norm, dtype=torch.float32)\n', (1136, 1175), False, 'import torch\n'), ((1197, 1239), 'torch.tensor', 'torch.tensor', (['actions'], {'dtype': 'torch.float32'}), '(actions, dtype=torch.float32)\n', (1209, 1239), False, 'import torch\n'), ((2383, 2408), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['actor_network'], {}), '(actor_network)\n', (2393, 2408), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((2616, 2642), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['critic_network'], {}), '(critic_network)\n', (2626, 2642), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((3588, 3634), 'torch.tensor', 'torch.tensor', (['inputs_norm'], {'dtype': 'torch.float32'}), '(inputs_norm, dtype=torch.float32)\n', (3600, 3634), False, 'import torch\n'), ((3665, 3716), 'torch.tensor', 'torch.tensor', (['inputs_next_norm'], {'dtype': 'torch.float32'}), '(inputs_next_norm, dtype=torch.float32)\n', (3677, 3716), False, 'import torch\n'), ((3738, 3780), 'torch.tensor', 'torch.tensor', (['actions'], {'dtype': 'torch.float32'}), '(actions, dtype=torch.float32)\n', (3750, 3780), False, 'import torch\n'), ((3893, 3936), 'torch.tensor', 'torch.tensor', (['instruction'], {'dtype': 'torch.long'}), '(instruction, dtype=torch.long)\n', (3905, 3936), False, 'import torch\n'), ((5149, 5174), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['actor_network'], {}), '(actor_network)\n', (5159, 5174), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((5382, 5408), 'mpi_utils.mpi_utils.sync_grads', 'sync_grads', (['critic_network'], {}), '(critic_network)\n', (5392, 5408), False, 'from mpi_utils.mpi_utils import sync_grads\n'), ((547, 564), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (559, 564), False, 'import torch\n'), ((586, 605), 'torch.tensor', 'torch.tensor', (['alpha'], {}), '(alpha)\n', (598, 605), False, 'import torch\n'), ((1557, 1572), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1570, 1572), False, 'import torch\n'), ((2208, 2231), 'torch.min', 'torch.min', (['qf_pi'], {'dim': '(0)'}), '(qf_pi, dim=0)\n', (2217, 2231), False, 'import torch\n'), ((4222, 4237), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4235, 4237), False, 'import torch\n'), ((4974, 4997), 'torch.min', 'torch.min', (['qf_pi'], {'dim': '(0)'}), '(qf_pi, dim=0)\n', (4983, 4997), False, 'import torch\n'), ((1255, 1297), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float32'}), '(rewards, dtype=torch.float32)\n', (1267, 1297), False, 'import torch\n'), ((3796, 3838), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float32'}), '(rewards, dtype=torch.float32)\n', (3808, 3838), False, 'import torch\n'), ((1774, 1806), 'torch.min', 'torch.min', (['qf_next_target'], {'dim': '(0)'}), '(qf_next_target, dim=0)\n', (1783, 1806), False, 'import torch\n'), ((4479, 4511), 'torch.min', 'torch.min', (['qf_next_target'], {'dim': '(0)'}), '(qf_next_target, dim=0)\n', (4488, 4511), False, 'import torch\n'), ((2006, 2035), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['_qf', 'next_q_value'], {}), '(_qf, next_q_value)\n', (2016, 2035), True, 'import torch.nn.functional as F\n'), ((4731, 4760), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['_qf', 'next_q_value'], {}), '(_qf, next_q_value)\n', (4741, 4760), True, 'import torch.nn.functional as F\n')] |
# -*- coding: utf-8 -*-
"""
plot acc loss
@author: atpandey
"""
#%%
import matplotlib.pyplot as plt
#%%
ff='./to_laptop/trg_file.txt'
with open(ff,'r') as trgf:
listidx=[]
listloss=[]
listacc=[]
ctr=0
for line in trgf:
if(ctr>0):
ll=line.split(',')
listidx.append(ll[0])
listloss.append(ll[1])
listacc.append(ll[2])
#listf.append(line)
ctr +=1
#for i in range(len(listidx)):
# print("idx: {}, loss: {}, acc: {}".format(listidx[i],listloss[i],listacc[i]))
# Make a figure
fig = plt.figure()
plt.subplots_adjust(top = 0.99, bottom=0.05, hspace=0.5, wspace=0.4)
# The axes
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
#plots
ax1.plot(listloss,'bo-',label='loss')
ax2.plot(listacc,'go-',label='accuracy')
ax1.set_xlabel('training idx')
ax1.set_ylabel('Loss')
ax1.set_title('loss data set')
ax1.legend()
ax2.set_xlabel('training idx')
ax2.set_ylabel('accuracy')
ax2.set_title('accuracydata set')
ax2.legend()
plt.show()
plt.savefig('./outputs/loss_accuracy.png') | [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((592, 604), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (602, 604), True, 'import matplotlib.pyplot as plt\n'), ((606, 672), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.99)', 'bottom': '(0.05)', 'hspace': '(0.5)', 'wspace': '(0.4)'}), '(top=0.99, bottom=0.05, hspace=0.5, wspace=0.4)\n', (625, 672), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1054), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1052, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1097), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./outputs/loss_accuracy.png"""'], {}), "('./outputs/loss_accuracy.png')\n", (1066, 1097), True, 'import matplotlib.pyplot as plt\n')] |
##--------------------------------Main file------------------------------------
##
## Copyright (C) 2020 by <NAME> (<EMAIL>)
## June, 2020
## <EMAIL>
##-----------------------------------------------------------------------------
# Variables aleatorias múltiples
# Se consideran dos bases de datos las cuales contienen los descrito
# a continuación:
# 1. ****** Registro de la frecuencia relativa de dos variables aleatorias
# conjuntas en forma de tabla: xy.csv
# 2. ****** Pares (x, y) y su probabilidad asociada: xyp.csv
# Recordando que variable aleatoria es una función determinista.
#### **************** Algoritmo **************** ####
#******************************************************
# IMPORTANDO PAQUETES
#******************************************************
# Es importante considerar que notas son necesarias pero si
# fueron usadas durante el desarrollo de la tarea por diversas
# razones por lo cual se mantiene dentro del algortimo en forma
# comentario.
# from __future__ import division
# from pylab import *
# from sklearn import *
# from sklearn.preprocessing import PolynomialFeatures
# import math
# import decimal
# import pandas as pd
# from scipy.stats import norm
# from scipy.stats import rayleigh
# import csv
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import axes3d
from numpy import *
import numpy as np
from matplotlib import cm
import scipy.stats as stats
from scipy.optimize import curve_fit
#******************************************************
# DEFINICIONES
#******************************************************
def distribucion_normal(va, mu, sigma):
dist_normal = 1/(np.sqrt(2*np.pi*sigma**2)) * np.exp(-(va-mu)**2/(2*sigma**2))
return dist_normal
def densidad_conjunta(va0,va1,mu0,sigma0,mu1,sigma1):
val_conjunto = 1/((np.sqrt(2*np.pi*sigma0**2)) * np.exp(-(va0-mu0)**2/(2*sigma0**2)) * (1/(np.sqrt(2*np.pi*sigma1**2)) * np.exp(-(va1-mu1)**2/(2*sigma1**2))))
return val_conjunto
def ajuste_curva(marginal, par1, par2, distri_norm, graph_label_dis, distri_x_name_img, func_graph_label, function_va_img):
va = np.linspace(par1,par2,len(marginal))
plt.bar(va, marginal, label= graph_label_dis)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + distri_x_name_img + ".png")
parametros_va, _ = curve_fit(distri_norm, va, marginal)
mu, sigma = parametros_va[0], parametros_va[1]
print("\n\nMu " + distri_x_name_img + " = ", mu)
print("Sigma " + distri_x_name_img + " = ", sigma)
va_function = stats.norm(mu,sigma)
curva_ajustada = np.linspace(va_function.ppf(0.01), va_function.ppf(0.99), 100)
plt.plot(curva_ajustada,va_function.pdf(curva_ajustada),label=func_graph_label)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + function_va_img+".png")
# # Limpia el area de graficacion
plt.cla()
return curva_ajustada, mu, sigma
def valor_esperado(marginal,lim_inferior,lim_superior, de_quien_v_valor_esperado):
dominio = []
valor_esperado_marginal = 0
for k in range (5, lim_superior +1):
dominio.append(k)
dominio = list(OrderedDict.fromkeys(dominio))
print("\n\nEl dominio es de: ", dominio)
for i in range (0,len(marginal)):
valor_esperado_marginal = valor_esperado_marginal + dominio[i]*marginal[i]
print("\n" +de_quien_v_valor_esperado +" tiene un valor de: ", valor_esperado_marginal)
return valor_esperado_marginal
def grafica_en2d(mu_va, sigma_va, par1_modelo, nombre2d):
va_funcion_distri = stats.norm(mu_va,sigma_va)
curve = np.linspace(va_funcion_distri.ppf(0.01), va_funcion_distri.ppf(0.99), par1_modelo)
plt.plot(curve,va_funcion_distri.pdf(curve),label=nombre2d)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre2d+".png")
# # Limpia el area de graficacion
plt.cla()
return
def grafica_en3d(VA0_modelo, VA1_modelo, VA0, VA1, nombre):
Z = []
for i in VA0:
XY = []
for j in VA1:
XY.append(i*j)
Z.append(XY)
fig = plt.figure()
eje_x= plt.axes(projection='3d')
VA0,VA1 = np.meshgrid(VA0_modelo,VA1_modelo)
eje_x.plot_surface(VA0,VA1,np.array(Z),cmap=cm.coolwarm)
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre+".png")
return
#******************************************************
# OBTENIENDO VALORES
# DE LOS CSV
#******************************************************
data = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv", index_col=0)
data_xyp = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv")
#******************************************************
# CURVA DE MEJOR AJUSTE
# DE LAS FUNCIONES DE
# DENSIDAD MARGINALES X & Y
#******************************************************
# Se requieren los valores marginales tanto de x como de y
# Columna con la sumatoria de todas las columnas es la probabilidad marginal de X
marg_value_x = [n for n in data.sum(axis=1, numeric_only=True)]
# Fila con la sumatoria de todas las filas es la probabilidad marginal de Y
marg_value_y = [n for n in data.sum(axis=0, numeric_only=True)]
print("\nValor marginal de X: ", marg_value_x)
print("\nValor marginal de Y: ", marg_value_y)
x_curva_modelo, x_mu, x_sigma = ajuste_curva(marg_value_x, 5, 15, distribucion_normal, "Datos que pertenencen a X","Datos_de_X", "Modelos de X(x)", "Modelado_X(x)")
y_curva_modelo, y_mu, y_sigma = ajuste_curva(marg_value_y, 5, 25, distribucion_normal, "Datos que pertenencen a Y","Datos_de_Y", "Modelos de Y(y)", "Modelado_Y(y)")
#******************************************************
# FUNCION DE DENSIDAD
# CONJUNTA DE
# X & Y
#******************************************************
probabi_conjuntaX = distribucion_normal(x_curva_modelo,x_mu,x_sigma)
probabi_conjuntaY = distribucion_normal(y_curva_modelo,y_mu,y_sigma)
#******************************************************
# VALORES DE CORRELACION, COVARIANZA
# COEFICIENTE DE CORRELACION (PEARSON)
# Y SIGNIFICADO
#******************************************************
###### OBTENIDOS CON XY.CSV
# Se requieren los valores anteriormente calculados. Para calcular
# E[X] & E[Y] lo que se conoce como los valores.
# Valores inicializados de los valores de X y Y (E[X] y E[Y])
# Este rango es de [x0, x1], es decir, incluye los limites
e_x = valor_esperado(marg_value_x,5,15, "X")
e_y = valor_esperado(marg_value_y,5,25, "Y")
multi_valor_esperados = e_x*e_y
# Se calcula E[X]*E[Y]
print("\n\nEl valor de E[X]E[Y] es de: ", multi_valor_esperados)
###### OBTENIDOS CON XYP.CSV
# Dado que la primera fila contiene las etiquetas de x, y, p
todos_mu_sum = data_xyp.x * data_xyp.y * data_xyp.p
# La sumatoria de E[XY] nos brinda su correlación
correlacion = todos_mu_sum.sum()
# Ahora para la covarianza, de acuerdo a lo visto en clase la
# covarianza es la correlacion menos la multiplicacion de los
# valores.
covarianza = correlacion - multi_valor_esperados
# Se requiere calcular el coeficiente de correlacion de
# Pearson en el cual se utilizan los valores de la data brindada de
# obtenidos entonces ...
# De acuerdo a los resultados obtenidos al correr el programa
# se ve que:
# SigmaDatos_de_X = 3.2994428707078436
# SigmaDatos_de_Y = 6.0269377486808775
# Para el coeficiente pearson se calcula como la covarianza
# divida entre la multiplicacion de los sigmas
coef_pearson = covarianza/(3.2994428707078436*6.0269377486808775)
print("\nEl resultado de la correlación es de: ", correlacion)
print("\nEl resultado de la covarianza es de: ",covarianza)
print("\nDe acuerdo a los datos obtenidos y considerando todo sus decimales se tiene que el coeficiente de Pearson es de: ", coef_pearson)
#******************************************************
# GRAFICA EN 2D DE LAS FUNCIONES
# DE DENSIDAD MARGINALES
# &
# GRAFICA EN 3D DE LA FUNCION
# DE DENSIDAD CONJUNTA
#******************************************************
# Dado que se requiere redondear los valores para la gráfica se toma en
# cuenta que los parámetros completos para el modelo serían los ya calculados
distribucion_de_x = grafica_en2d(x_mu, x_sigma, 100,"Distribucion_de_X")
distribucion_de_y = grafica_en2d(y_mu, y_sigma, 100,"Distribucion_de_Y")
dis_cojun3d = grafica_en3d(x_curva_modelo, y_curva_modelo, probabi_conjuntaX, probabi_conjuntaY, "Distribucion_en_3D")
| [
"scipy.optimize.curve_fit",
"matplotlib.pyplot.savefig",
"collections.OrderedDict.fromkeys",
"pandas.read_csv",
"numpy.sqrt",
"scipy.stats.norm",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"numpy.meshgrid",
"matplotlib.pyplot.cl... | [((4558, 4647), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv"""'], {'index_col': '(0)'}), "('/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv',\n index_col=0)\n", (4569, 4647), True, 'import pandas as pd\n'), ((4655, 4728), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv"""'], {}), "('/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv')\n", (4666, 4728), True, 'import pandas as pd\n'), ((2280, 2324), 'matplotlib.pyplot.bar', 'plt.bar', (['va', 'marginal'], {'label': 'graph_label_dis'}), '(va, marginal, label=graph_label_dis)\n', (2287, 2324), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2337, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2438), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + distri_x_name_img +\n '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' +\n distri_x_name_img + '.png')\n", (2352, 2438), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2493), 'scipy.optimize.curve_fit', 'curve_fit', (['distri_norm', 'va', 'marginal'], {}), '(distri_norm, va, marginal)\n', (2466, 2493), False, 'from scipy.optimize import curve_fit\n'), ((2663, 2684), 'scipy.stats.norm', 'stats.norm', (['mu', 'sigma'], {}), '(mu, sigma)\n', (2673, 2684), True, 'import scipy.stats as stats\n'), ((2847, 2859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2857, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2956), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + function_va_img + '.png'\n )"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' +\n function_va_img + '.png')\n", (2872, 2956), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3016), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3014, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3669), 'scipy.stats.norm', 'stats.norm', (['mu_va', 'sigma_va'], {}), '(mu_va, sigma_va)\n', (3652, 3669), True, 'import scipy.stats as stats\n'), ((3823, 3835), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3833, 3835), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3925), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre2d + '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre2d +\n '.png')\n", (3848, 3925), True, 'import matplotlib.pyplot as plt\n'), ((3976, 3985), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3983, 3985), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4154, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4190), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (4173, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4202, 4237), 'numpy.meshgrid', 'np.meshgrid', (['VA0_modelo', 'VA1_modelo'], {}), '(VA0_modelo, VA1_modelo)\n', (4213, 4237), True, 'import numpy as np\n'), ((4296, 4382), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre + '.png')"], {}), "('/Users/belindabrown/Desktop/VA_multiples/results/' + nombre +\n '.png')\n", (4307, 4382), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1863), 'numpy.exp', 'np.exp', (['(-(va - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(va - mu) ** 2 / (2 * sigma ** 2))\n', (1827, 1863), True, 'import numpy as np\n'), ((3252, 3281), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['dominio'], {}), '(dominio)\n', (3272, 3281), False, 'from collections import OrderedDict\n'), ((4265, 4276), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (4273, 4276), True, 'import numpy as np\n'), ((1792, 1823), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma ** 2)'], {}), '(2 * np.pi * sigma ** 2)\n', (1799, 1823), True, 'import numpy as np\n'), ((1949, 1981), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma0 ** 2)'], {}), '(2 * np.pi * sigma0 ** 2)\n', (1956, 1981), True, 'import numpy as np\n'), ((1979, 2024), 'numpy.exp', 'np.exp', (['(-(va0 - mu0) ** 2 / (2 * sigma0 ** 2))'], {}), '(-(va0 - mu0) ** 2 / (2 * sigma0 ** 2))\n', (1985, 2024), True, 'import numpy as np\n'), ((2051, 2096), 'numpy.exp', 'np.exp', (['(-(va1 - mu1) ** 2 / (2 * sigma1 ** 2))'], {}), '(-(va1 - mu1) ** 2 / (2 * sigma1 ** 2))\n', (2057, 2096), True, 'import numpy as np\n'), ((2021, 2053), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * sigma1 ** 2)'], {}), '(2 * np.pi * sigma1 ** 2)\n', (2028, 2053), True, 'import numpy as np\n')] |
# -*- coding:UTF-8 -*-
import pandas as pd
from minepy import MINE
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
import xgboost as xgb
import operator
from sklearn.utils import shuffle
from Common.ModelCommon import ModelCV
from sklearn import svm
import numpy as np
class NAClass(object):
def __init__(self):
pass
# 获取存在NA值的特征列表
def GetNAFeatures(self, df):
return df.columns[df.isnull().sum() != 0].tolist()
# 缺失特征按从多到少排序进行展示
def ShowNAInfo(self, df, NAlist):
NA_count = df[NAlist].isnull().sum().sort_values(ascending=False)
NAInfo = pd.DataFrame({'NA_count': NA_count, 'NA_percent': NA_count/df.shape[0]})
print(NAInfo)
# 含缺失值特征处理的通用接口,strategy为处理策略
def HandleNA(self, df, NAfeaturesList, strategy='mean'):
if strategy == 'mean':
for feature in NAfeaturesList:
if df[feature].dtypes == 'object':
raise ValueError('Nonnumeric feature!')
df[feature].fillna(df[feature].mean(), inplace=True)
elif strategy == 'mode':
for feature in NAfeaturesList:
df[feature].fillna(df[feature].mode()[0], inplace=True)
elif strategy == 'drop':
df.drop(NAfeaturesList, axis=1, inplace=True)
else:
for feature in NAfeaturesList:
if (df[feature].dtypes == 'object' and type(strategy) != str) or (
df[feature].dtypes != 'object' and type(strategy) == str):
raise ValueError('Mismatched type!')
df[feature].fillna(strategy, inplace=True)
def checkNA(self, df):
return df.isnull().sum().max()
def CategoricalList(df):
return [attr for attr in df.columns if df.dtypes[attr] == 'object']
def NumericalList(df):
return [attr for attr in df.columns if df.dtypes[attr] != 'object']
def GetTargetDf(df, target):
targetdf = pd.DataFrame(df[target].value_counts())
targetdf['Percent'] = targetdf[target]/df.shape[0]
return targetdf
def GetZeroDf(df):
zerodf = pd.DataFrame(df[df == 0].count())
zerodf['Percent'] = zerodf[0]/df.shape[0]
zerodf.rename(columns={0: 'Count'}, inplace=True)
return zerodf
def GetValueCountDf(df):
valueCountList = []
for feat in df.columns:
valueCountList.append(df[feat].value_counts().shape[0])
valueCountDf = pd.DataFrame({'feat': df.columns, 'valueCount': valueCountList})
return valueCountDf
def GetZeroColumns(df):
zeros = df[df != 0].count()
return zeros[zeros == 0].index
def mic(x, y):
m = MINE()
m.compute_score(x, y)
return m.mic()
def featShow(train_data, feat):
plt.scatter(range(train_data.shape[0]), train_data[feat].values, s=20)
plt.xlabel('index')
plt.ylabel(feat)
plt.show()
def TypeShow(train_data):
dtype_df = train_data.dtypes.reset_index()
dtype_df.columns = ["Count", "Column Type"]
print(dtype_df.groupby("Column Type").aggregate('count').reset_index())
# 通过决策树获取特征重要性
def TreeImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
clf = ExtraTreesClassifier()
clf.fit(x, y.astype('int'))
imptdf = pd.DataFrame({'feat': x.columns, 'importance': clf.feature_importances_})
imptdf_sort = imptdf.sort_values(by='importance', ascending=False)
# print("decision tree importance:\n", imptdf_sort)
sns.barplot(data=imptdf_sort, x='feat', y='importance')
plt.xticks(rotation='vertical')
# plt.show()
return imptdf_sort
def xgbImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
dtrain = xgb.DMatrix(x, y)
xgb_params = {"objective": "binary:logistic", "eta": 0.01, "max_depth": 8, "seed": 42, "silent": 1}
model = xgb.train(xgb_params, dtrain, num_boost_round=100)
impt = model.get_fscore()
impt = sorted(impt.items(), key=operator.itemgetter(1))
imptdf = pd.DataFrame(impt, columns=['feature', 'fscore'])
imptdf_sort = imptdf.sort_values(by='fscore', ascending=False)
# print("xgb importance:\n", imptdf_sort)
imptdf_sort.to_csv('../tmp/xgb_importance.csv', index=False)
xgb.plot_importance(model, max_num_features=400, height=0.8)
# plt.show()
return imptdf_sort
def valueCountsShow(train_data, featlist):
for feat in featlist:
print(train_data[feat].value_counts())
# rate为希望采样后的0样本的个数为rate*1样本
def underSampling(train, rate):
idx_0 = train[train['TARGET'] == 0].index
idx_1 = train[train['TARGET'] == 1].index
len_1 = len(train.loc[idx_1])
undersample_idx_0 = shuffle(idx_0, random_state=37, n_samples=int(len_1*rate))
idx_list = list(undersample_idx_0) + list(idx_1)
train = train.loc[idx_list].reset_index(drop=True)
return train
# repeat为重复样本1的次数
def overSampling(train, repeat):
idx_1 = train[train['TARGET'] == 1].index
i = 0
while i < repeat:
train = pd.concat([train, train.iloc[idx_1, :]], axis=0).reset_index(drop=True)
i += 1
return train
# 通过train_data的cv分数来作为评判标准,但是每种不同比率的sample,最终的样本数有一定不同,是否影响指标的客观准确性?
def getBestUnSamplingRate(train, ratelist):
bestscore = 0
bestrate = 0
for rate in ratelist:
svc = svm.LinearSVC()
train_data = underSampling(train, rate)
score = ModelCV(svc, 'svm', train_data, 5)
print("rate :%f, score:%f" % (rate, score))
if score > bestscore:
bestscore = score
bestrate = rate
print("best rate :%f, best score:%f" % (bestrate, bestscore))
return bestrate
def corr_heatmap(train, v):
correlations = train[v].corr()
# Create color map ranging between two colors
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(correlations, cmap=cmap, vmax=1.0, center=0, fmt='.2f',
square=True, linewidths=.5, annot=True, cbar_kws={"shrink": .75})
plt.show()
def typeShow(train_data):
print(train_data.dtypes.value_counts())
def getTypeMap(train_data):
typeMap = {}
typeMap['int64'] = train_data.dtypes[train_data.dtypes == 'int64'].index
typeMap['float64'] = train_data.dtypes[train_data.dtypes == 'float64'].index
return typeMap
# iswhole为True时代表是完整的数据集,需要将TARGET去除再求相关性,为False时代表已经是筛选后的列,不包含TARGET
def getHighCorrList(df, thres, iswhole):
if iswhole:
x = df.iloc[:, :-1]
else:
x = df
corr = x.corr()
index = corr.index[np.where(corr > thres)[0]]
columns = corr.columns[np.where(corr > thres)[1]]
highCorrList = [[index[i], columns[i]] for i in range(len(index)) if index[i] != columns[i]]
uniqList = [[0, 0]]
for i in range(len(highCorrList)):
uniqCount = 0
for j in range(len(uniqList)):
if highCorrList[i][0] == uniqList[j][1] and highCorrList[i][1] == uniqList[j][0]:
uniqCount += 1
if uniqCount == 0:
uniqList.append(highCorrList[i])
del uniqList[0]
return uniqList
def getDropHighCorrList(highList):
dropList = []
for item in highList:
if item[0] in dropList:
break
if item[1] in dropList:
break
else:
dropList.append(item[1])
return dropList
def getUinqueCorrDf(train, threshold):
cor_mat = train.corr()
important_corrs = (cor_mat[abs(cor_mat) > threshold][cor_mat != 1.0]).unstack().dropna().to_dict()
unique_important_corrs = pd.DataFrame(
list(set([(tuple(sorted(key)), important_corrs[key]) for key in important_corrs])),
columns=['attribute pair', 'correlation'])
unique_important_corrs = unique_important_corrs.ix[abs(unique_important_corrs['correlation']).argsort()[::-1]]
return unique_important_corrs | [
"xgboost.DMatrix",
"sklearn.ensemble.ExtraTreesClassifier",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"xgboost.train",
"matplotlib.pyplot.xlabel",
"xgboost.plot_importance",
"seaborn.diverging_palette",
"sklearn.svm.LinearSVC",
"seaborn.heatmap",
"operator.itemgetter",
"numpy.whe... | [((2446, 2510), 'pandas.DataFrame', 'pd.DataFrame', (["{'feat': df.columns, 'valueCount': valueCountList}"], {}), "({'feat': df.columns, 'valueCount': valueCountList})\n", (2458, 2510), True, 'import pandas as pd\n'), ((2653, 2659), 'minepy.MINE', 'MINE', ([], {}), '()\n', (2657, 2659), False, 'from minepy import MINE\n'), ((2818, 2837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""index"""'], {}), "('index')\n", (2828, 2837), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['feat'], {}), '(feat)\n', (2852, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2873), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2871, 2873), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3231), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (3229, 3231), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((3277, 3350), 'pandas.DataFrame', 'pd.DataFrame', (["{'feat': x.columns, 'importance': clf.feature_importances_}"], {}), "({'feat': x.columns, 'importance': clf.feature_importances_})\n", (3289, 3350), True, 'import pandas as pd\n'), ((3482, 3537), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'imptdf_sort', 'x': '"""feat"""', 'y': '"""importance"""'}), "(data=imptdf_sort, x='feat', y='importance')\n", (3493, 3537), True, 'import seaborn as sns\n'), ((3542, 3573), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (3552, 3573), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3754), 'xgboost.DMatrix', 'xgb.DMatrix', (['x', 'y'], {}), '(x, y)\n', (3748, 3754), True, 'import xgboost as xgb\n'), ((3871, 3921), 'xgboost.train', 'xgb.train', (['xgb_params', 'dtrain'], {'num_boost_round': '(100)'}), '(xgb_params, dtrain, num_boost_round=100)\n', (3880, 3921), True, 'import xgboost as xgb\n'), ((4025, 4074), 'pandas.DataFrame', 'pd.DataFrame', (['impt'], {'columns': "['feature', 'fscore']"}), "(impt, columns=['feature', 'fscore'])\n", (4037, 4074), True, 'import pandas as pd\n'), ((4257, 4317), 'xgboost.plot_importance', 'xgb.plot_importance', (['model'], {'max_num_features': '(400)', 'height': '(0.8)'}), '(model, max_num_features=400, height=0.8)\n', (4276, 4317), True, 'import xgboost as xgb\n'), ((5781, 5825), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(220)', '(10)'], {'as_cmap': '(True)'}), '(220, 10, as_cmap=True)\n', (5802, 5825), True, 'import seaborn as sns\n'), ((5830, 5970), 'seaborn.heatmap', 'sns.heatmap', (['correlations'], {'cmap': 'cmap', 'vmax': '(1.0)', 'center': '(0)', 'fmt': '""".2f"""', 'square': '(True)', 'linewidths': '(0.5)', 'annot': '(True)', 'cbar_kws': "{'shrink': 0.75}"}), "(correlations, cmap=cmap, vmax=1.0, center=0, fmt='.2f', square=\n True, linewidths=0.5, annot=True, cbar_kws={'shrink': 0.75})\n", (5841, 5970), True, 'import seaborn as sns\n'), ((5984, 5994), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5992, 5994), True, 'import matplotlib.pyplot as plt\n'), ((651, 725), 'pandas.DataFrame', 'pd.DataFrame', (["{'NA_count': NA_count, 'NA_percent': NA_count / df.shape[0]}"], {}), "({'NA_count': NA_count, 'NA_percent': NA_count / df.shape[0]})\n", (663, 725), True, 'import pandas as pd\n'), ((5314, 5329), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (5327, 5329), False, 'from sklearn import svm\n'), ((5394, 5428), 'Common.ModelCommon.ModelCV', 'ModelCV', (['svc', '"""svm"""', 'train_data', '(5)'], {}), "(svc, 'svm', train_data, 5)\n", (5401, 5428), False, 'from Common.ModelCommon import ModelCV\n'), ((3988, 4010), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4007, 4010), False, 'import operator\n'), ((6517, 6539), 'numpy.where', 'np.where', (['(corr > thres)'], {}), '(corr > thres)\n', (6525, 6539), True, 'import numpy as np\n'), ((6571, 6593), 'numpy.where', 'np.where', (['(corr > thres)'], {}), '(corr > thres)\n', (6579, 6593), True, 'import numpy as np\n'), ((5020, 5068), 'pandas.concat', 'pd.concat', (['[train, train.iloc[idx_1, :]]'], {'axis': '(0)'}), '([train, train.iloc[idx_1, :]], axis=0)\n', (5029, 5068), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Perform Bluetooth LE Scan.
Based on https://github.com/hbldh/bleak/blob/master/bleak/backends/dotnet/discovery.py by
Created by hbldh <<EMAIL>>
"""
import logging
logger = logging.getLogger('bleak_scanner')
import asyncio
import queue
from bleak.backends.device import BLEDevice
# Import of Bleak CLR->UWP Bridge. It is not needed here, but it enables loading of Windows.Devices
from BleakBridge import Bridge
from System import Array, Byte
from Windows.Devices.Bluetooth.Advertisement import \
BluetoothLEAdvertisementWatcher, BluetoothLEScanningMode
from Windows.Storage.Streams import DataReader, IBuffer
QUEUE_SIZE = 100
###############################################################################
async def scanner(
outqueue: asyncio.Queue,
stopevent: asyncio.Event,
**kwargs
):
"""Perform a continuous Bluetooth LE Scan using Windows.Devices.Bluetooth.Advertisement
Args:
outqueue: outgoing queue
stopevent: stop event
"""
logger.info(f'>>> scanner:windows')
watcher = BluetoothLEAdvertisementWatcher()
q = queue.Queue(QUEUE_SIZE)
# -----------------------------------------------------------------------------
def _format_bdaddr(a):
return ":".join("{:02X}".format(x) for x in a.to_bytes(6, byteorder="big"))
# -----------------------------------------------------------------------------
def AdvertisementWatcher_Received(sender, e):
if sender == watcher:
# logger.debug("Received {0}.".format(_format_event_args(e)))
l_bdaddr = _format_bdaddr(e.BluetoothAddress)
l_uuids = []
for l_u in e.Advertisement.ServiceUuids:
l_uuids.append(l_u.ToString())
l_data = {}
for l_m in e.Advertisement.ManufacturerData:
l_md = IBuffer(l_m.Data)
l_b = Array.CreateInstance(Byte, l_md.Length)
l_reader = DataReader.FromBuffer(l_md)
l_reader.ReadBytes(l_b)
l_data[l_m.CompanyId] = bytes(l_b)
local_name = e.Advertisement.LocalName
logger.debug(f'>>> bdaddr:{l_bdaddr} local_name:{local_name} mfdata:{l_data}')
if q:
q.put(BLEDevice(
l_bdaddr,
local_name,
e,
uuids=l_uuids,
manufacturer_data=l_data,
))
def AdvertisementWatcher_Stopped(sender, e):
if sender == watcher:
logger.info(f'>>> stopped')
# -----------------------------------------------------------------------------
watcher.Received += AdvertisementWatcher_Received
watcher.Stopped += AdvertisementWatcher_Stopped
watcher.ScanningMode = BluetoothLEScanningMode.Active
# Watcher works outside of the Python process.
watcher.Start()
# communication loop
while not stopevent.is_set():
try:
l_data = q.get_nowait()
if l_data and outqueue:
await outqueue.put(l_data)
except queue.Empty:
try:
await asyncio.sleep(0.1)
except asyncio.CancelledError:
logger.warning(f'>>> CancelledError')
break
except:
logger.exception(f'>>> exception')
watcher.Stop()
await asyncio.sleep(0.1)
try:
watcher.Received -= AdvertisementWatcher_Received
watcher.Stopped -= AdvertisementWatcher_Stopped
logger.info(f'>>> Event handlers removed')
except:
logger.warning(f'>>> Could not remove event handlers')
| [
"logging.getLogger",
"Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementWatcher",
"Windows.Storage.Streams.IBuffer",
"asyncio.sleep",
"bleak.backends.device.BLEDevice",
"queue.Queue",
"System.Array.CreateInstance",
"Windows.Storage.Streams.DataReader.FromBuffer"
] | [((202, 236), 'logging.getLogger', 'logging.getLogger', (['"""bleak_scanner"""'], {}), "('bleak_scanner')\n", (219, 236), False, 'import logging\n'), ((1065, 1098), 'Windows.Devices.Bluetooth.Advertisement.BluetoothLEAdvertisementWatcher', 'BluetoothLEAdvertisementWatcher', ([], {}), '()\n', (1096, 1098), False, 'from Windows.Devices.Bluetooth.Advertisement import BluetoothLEAdvertisementWatcher, BluetoothLEScanningMode\n'), ((1107, 1130), 'queue.Queue', 'queue.Queue', (['QUEUE_SIZE'], {}), '(QUEUE_SIZE)\n', (1118, 1130), False, 'import queue\n'), ((3372, 3390), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (3385, 3390), False, 'import asyncio\n'), ((1846, 1863), 'Windows.Storage.Streams.IBuffer', 'IBuffer', (['l_m.Data'], {}), '(l_m.Data)\n', (1853, 1863), False, 'from Windows.Storage.Streams import DataReader, IBuffer\n'), ((1886, 1925), 'System.Array.CreateInstance', 'Array.CreateInstance', (['Byte', 'l_md.Length'], {}), '(Byte, l_md.Length)\n', (1906, 1925), False, 'from System import Array, Byte\n'), ((1953, 1980), 'Windows.Storage.Streams.DataReader.FromBuffer', 'DataReader.FromBuffer', (['l_md'], {}), '(l_md)\n', (1974, 1980), False, 'from Windows.Storage.Streams import DataReader, IBuffer\n'), ((2254, 2329), 'bleak.backends.device.BLEDevice', 'BLEDevice', (['l_bdaddr', 'local_name', 'e'], {'uuids': 'l_uuids', 'manufacturer_data': 'l_data'}), '(l_bdaddr, local_name, e, uuids=l_uuids, manufacturer_data=l_data)\n', (2263, 2329), False, 'from bleak.backends.device import BLEDevice\n'), ((3142, 3160), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (3155, 3160), False, 'import asyncio\n')] |
#Desafio019 ( aplicação randomica para determinar que aluno vai no quadro.
import random
al01 = str('joao'),('maria'),('pédro'),('paula')
print(random.choice(al01))
| [
"random.choice"
] | [((145, 164), 'random.choice', 'random.choice', (['al01'], {}), '(al01)\n', (158, 164), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.networkmanagement_v1beta1.services.reachability_service import pagers
from google.cloud.networkmanagement_v1beta1.types import connectivity_test
from google.cloud.networkmanagement_v1beta1.types import reachability
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ReachabilityServiceGrpcAsyncIOTransport
from .client import ReachabilityServiceClient
class ReachabilityServiceAsyncClient:
"""The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
"""
_client: ReachabilityServiceClient
DEFAULT_ENDPOINT = ReachabilityServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ReachabilityServiceClient.DEFAULT_MTLS_ENDPOINT
connectivity_test_path = staticmethod(ReachabilityServiceClient.connectivity_test_path)
parse_connectivity_test_path = staticmethod(ReachabilityServiceClient.parse_connectivity_test_path)
common_billing_account_path = staticmethod(ReachabilityServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ReachabilityServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ReachabilityServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(ReachabilityServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(ReachabilityServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(ReachabilityServiceClient.parse_common_organization_path)
common_project_path = staticmethod(ReachabilityServiceClient.common_project_path)
parse_common_project_path = staticmethod(ReachabilityServiceClient.parse_common_project_path)
common_location_path = staticmethod(ReachabilityServiceClient.common_location_path)
parse_common_location_path = staticmethod(ReachabilityServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReachabilityServiceAsyncClient: The constructed client.
"""
return ReachabilityServiceClient.from_service_account_info.__func__(ReachabilityServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReachabilityServiceAsyncClient: The constructed client.
"""
return ReachabilityServiceClient.from_service_account_file.__func__(ReachabilityServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ReachabilityServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ReachabilityServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ReachabilityServiceClient).get_transport_class, type(ReachabilityServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ReachabilityServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the reachability service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ReachabilityServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ReachabilityServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_connectivity_tests(self,
request: reachability.ListConnectivityTestsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConnectivityTestsAsyncPager:
r"""Lists all Connectivity Tests owned by a project.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.ListConnectivityTestsRequest`):
The request object. Request for the
`ListConnectivityTests` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkmanagement_v1beta1.services.reachability_service.pagers.ListConnectivityTestsAsyncPager:
Response for the ListConnectivityTests method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = reachability.ListConnectivityTestsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_connectivity_tests,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListConnectivityTestsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_connectivity_test(self,
request: reachability.GetConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> connectivity_test.ConnectivityTest:
r"""Gets the details of a specific Connectivity Test.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.GetConnectivityTestRequest`):
The request object. Request for the
`GetConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkmanagement_v1beta1.types.ConnectivityTest:
A Connectivity Test for a network
reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.GetConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_connectivity_test(self,
request: reachability.CreateConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.CreateConnectivityTestRequest`):
The request object. Request for the
`CreateConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.CreateConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def update_connectivity_test(self,
request: reachability.UpdateConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.UpdateConnectivityTestRequest`):
The request object. Request for the
`UpdateConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.UpdateConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource.name", request.resource.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def rerun_connectivity_test(self,
request: reachability.RerunConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.RerunConnectivityTestRequest`):
The request object. Request for the
`RerunConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.RerunConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.rerun_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def delete_connectivity_test(self,
request: reachability.DeleteConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a specific ``ConnectivityTest``.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.DeleteConnectivityTestRequest`):
The request object. Request for the
`DeleteConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
request = reachability.DeleteConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-networkmanagement",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ReachabilityServiceAsyncClient",
)
| [
"google.cloud.networkmanagement_v1beta1.types.reachability.RerunConnectivityTestRequest",
"google.cloud.networkmanagement_v1beta1.types.reachability.CreateConnectivityTestRequest",
"google.cloud.networkmanagement_v1beta1.types.reachability.GetConnectivityTestRequest",
"google.api_core.gapic_v1.method_async.wr... | [((9369, 9419), 'google.cloud.networkmanagement_v1beta1.types.reachability.ListConnectivityTestsRequest', 'reachability.ListConnectivityTestsRequest', (['request'], {}), '(request)\n', (9410, 9419), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((9546, 9693), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.list_connectivity_tests'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n list_connectivity_tests, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (9579, 9693), False, 'from google.api_core import gapic_v1\n'), ((10311, 10420), 'google.cloud.networkmanagement_v1beta1.services.reachability_service.pagers.ListConnectivityTestsAsyncPager', 'pagers.ListConnectivityTestsAsyncPager', ([], {'method': 'rpc', 'request': 'request', 'response': 'response', 'metadata': 'metadata'}), '(method=rpc, request=request,\n response=response, metadata=metadata)\n', (10349, 10420), False, 'from google.cloud.networkmanagement_v1beta1.services.reachability_service import pagers\n'), ((11723, 11771), 'google.cloud.networkmanagement_v1beta1.types.reachability.GetConnectivityTestRequest', 'reachability.GetConnectivityTestRequest', (['request'], {}), '(request)\n', (11762, 11771), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((11898, 12043), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.get_connectivity_test'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n get_connectivity_test, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (11931, 12043), False, 'from google.api_core import gapic_v1\n'), ((14637, 14688), 'google.cloud.networkmanagement_v1beta1.types.reachability.CreateConnectivityTestRequest', 'reachability.CreateConnectivityTestRequest', (['request'], {}), '(request)\n', (14679, 14688), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((14815, 14963), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.create_connectivity_test'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n create_connectivity_test, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (14848, 14963), False, 'from google.api_core import gapic_v1\n'), ((15511, 15682), 'google.api_core.operation_async.from_gapic', 'operation_async.from_gapic', (['response', 'self._client._transport.operations_client', 'connectivity_test.ConnectivityTest'], {'metadata_type': 'reachability.OperationMetadata'}), '(response, self._client._transport.\n operations_client, connectivity_test.ConnectivityTest, metadata_type=\n reachability.OperationMetadata)\n', (15537, 15682), False, 'from google.api_core import operation_async\n'), ((17991, 18042), 'google.cloud.networkmanagement_v1beta1.types.reachability.UpdateConnectivityTestRequest', 'reachability.UpdateConnectivityTestRequest', (['request'], {}), '(request)\n', (18033, 18042), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((18169, 18317), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.update_connectivity_test'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n update_connectivity_test, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (18202, 18317), False, 'from google.api_core import gapic_v1\n'), ((18879, 19050), 'google.api_core.operation_async.from_gapic', 'operation_async.from_gapic', (['response', 'self._client._transport.operations_client', 'connectivity_test.ConnectivityTest'], {'metadata_type': 'reachability.OperationMetadata'}), '(response, self._client._transport.\n operations_client, connectivity_test.ConnectivityTest, metadata_type=\n reachability.OperationMetadata)\n', (18905, 19050), False, 'from google.api_core import operation_async\n'), ((21179, 21229), 'google.cloud.networkmanagement_v1beta1.types.reachability.RerunConnectivityTestRequest', 'reachability.RerunConnectivityTestRequest', (['request'], {}), '(request)\n', (21220, 21229), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((21356, 21503), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.rerun_connectivity_test'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n rerun_connectivity_test, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (21389, 21503), False, 'from google.api_core import gapic_v1\n'), ((22047, 22218), 'google.api_core.operation_async.from_gapic', 'operation_async.from_gapic', (['response', 'self._client._transport.operations_client', 'connectivity_test.ConnectivityTest'], {'metadata_type': 'reachability.OperationMetadata'}), '(response, self._client._transport.\n operations_client, connectivity_test.ConnectivityTest, metadata_type=\n reachability.OperationMetadata)\n', (22073, 22218), False, 'from google.api_core import operation_async\n'), ((24107, 24158), 'google.cloud.networkmanagement_v1beta1.types.reachability.DeleteConnectivityTestRequest', 'reachability.DeleteConnectivityTestRequest', (['request'], {}), '(request)\n', (24149, 24158), False, 'from google.cloud.networkmanagement_v1beta1.types import reachability\n'), ((24285, 24433), 'google.api_core.gapic_v1.method_async.wrap_method', 'gapic_v1.method_async.wrap_method', (['self._client._transport.delete_connectivity_test'], {'default_timeout': 'None', 'client_info': 'DEFAULT_CLIENT_INFO'}), '(self._client._transport.\n delete_connectivity_test, default_timeout=None, client_info=\n DEFAULT_CLIENT_INFO)\n', (24318, 24433), False, 'from google.api_core import gapic_v1\n'), ((24977, 25129), 'google.api_core.operation_async.from_gapic', 'operation_async.from_gapic', (['response', 'self._client._transport.operations_client', 'empty_pb2.Empty'], {'metadata_type': 'reachability.OperationMetadata'}), '(response, self._client._transport.\n operations_client, empty_pb2.Empty, metadata_type=reachability.\n OperationMetadata)\n', (25003, 25129), False, 'from google.api_core import operation_async\n'), ((25641, 25674), 'google.api_core.gapic_v1.client_info.ClientInfo', 'gapic_v1.client_info.ClientInfo', ([], {}), '()\n', (25672, 25674), False, 'from google.api_core import gapic_v1\n'), ((9881, 9952), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('parent', request.parent),)"], {}), "((('parent', request.parent),))\n", (9921, 9952), False, 'from google.api_core import gapic_v1\n'), ((12231, 12298), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('name', request.name),)"], {}), "((('name', request.name),))\n", (12271, 12298), False, 'from google.api_core import gapic_v1\n'), ((15151, 15222), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('parent', request.parent),)"], {}), "((('parent', request.parent),))\n", (15191, 15222), False, 'from google.api_core import gapic_v1\n'), ((18505, 18595), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('resource.name', request.resource.name),)"], {}), "((('resource.name', request.\n resource.name),))\n", (18545, 18595), False, 'from google.api_core import gapic_v1\n'), ((21691, 21758), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('name', request.name),)"], {}), "((('name', request.name),))\n", (21731, 21758), False, 'from google.api_core import gapic_v1\n'), ((24621, 24688), 'google.api_core.gapic_v1.routing_header.to_grpc_metadata', 'gapic_v1.routing_header.to_grpc_metadata', (["(('name', request.name),)"], {}), "((('name', request.name),))\n", (24661, 24688), False, 'from google.api_core import gapic_v1\n'), ((25469, 25533), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""google-cloud-networkmanagement"""'], {}), "('google-cloud-networkmanagement')\n", (25499, 25533), False, 'import pkg_resources\n')] |